diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f7d3affbaa..067862d96a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -107,7 +107,7 @@ jobs: - name: Install packages run: sudo apt install -y ccache libarchive-dev libbsd-dev libbpf-dev libfdt-dev libibverbs-dev libipsec-mb-dev libisal-dev libjansson-dev - libnuma-dev libpcap-dev libssl-dev ninja-build pkg-config python3-pip + libnuma-dev libpcap-dev libssl-dev libvirt-dev ninja-build pkg-config python3-pip python3-pyelftools python3-setuptools python3-wheel zlib1g-dev - name: Install libabigail build dependencies if no cache is available if: env.ABI_CHECKS == 'true' && steps.libabigail-cache.outputs.cache-hit != 'true' @@ -196,8 +196,8 @@ jobs: if: steps.image_cache.outputs.cache-hit != 'true' run: docker exec -i dpdk dnf install -y ccache intel-ipsec-mb-devel isa-l-devel jansson-devel libarchive-devel libatomic libbsd-devel - libbpf-devel libfdt-devel libpcap-devel libxdp-devel ninja-build - numactl-devel openssl-devel python3-pip python3-pyelftools + libbpf-devel libfdt-devel libpcap-devel libvirt-devel libxdp-devel + ninja-build numactl-devel openssl-devel python3-pip python3-pyelftools python3-setuptools python3-wheel rdma-core-devel zlib-devel - name: Save image in cache if: steps.image_cache.outputs.cache-hit != 'true' @@ -271,8 +271,8 @@ jobs: - name: Install packages run: docker exec -i dpdk dnf install -y ccache intel-ipsec-mb-devel isa-l-devel jansson-devel libarchive-devel libatomic libbsd-devel - libbpf-devel libfdt-devel libpcap-devel libxdp-devel ninja-build - numactl-devel openssl-devel python3-pip python3-pyelftools + libbpf-devel libfdt-devel libpcap-devel libvirt-devel libxdp-devel + ninja-build numactl-devel openssl-devel python3-pip python3-pyelftools python3-setuptools python3-wheel rdma-core-devel zlib-devel ${{ matrix.config.compiler }} - name: Run setup diff --git a/.gitignore b/.gitignore index 01a47a7606..903fe9bc64 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,10 @@ # ignore hidden files .* +# except the tracked ones and derivatives +!.ci/ +!.github/ +!.gitignore* +!.mailmap* # ignore generated documentation tables doc/guides/nics/overview_table.txt diff --git a/.mailmap b/.mailmap index 5290420258..4dbb8cf8a7 100644 --- a/.mailmap +++ b/.mailmap @@ -751,6 +751,7 @@ Július Milan Junfeng Guo Junjie Chen Junjie Wan +Junlong Wang Jun Qiu Jun W Zhou Junxiao Shi @@ -804,7 +805,7 @@ Kirill Rybalchenko Kishore Padmanabha Klaus Degner Kommula Shiva Shankar -Konstantin Ananyev +Konstantin Ananyev Krishna Murthy Krzysztof Galazka Krzysztof Kanas @@ -846,6 +847,7 @@ Li Feng Li Han Lihong Ma Lijian Zhang +Lijie Shan Lijuan Tu Lijun Ou Liming Sun @@ -869,6 +871,8 @@ Louis Peens Luca Boccassi Luca Vizzarro Luc Pelletier +Luka Jankovic +Lukas Sismis Lukasz Bartosik Lukasz Cieplicki Lukasz Czapnik @@ -1523,6 +1527,7 @@ Timmons C. Player Timothy McDaniel Timothy Miskell Timothy Redaelli +Tim Martin Tim Shearer Ting-Kai Ku Ting Xu diff --git a/MAINTAINERS b/MAINTAINERS index cd78bc7db1..f84ca3ea68 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -37,6 +37,7 @@ M: Ajit Khaparde T: git://dpdk.org/next/dpdk-next-net-brcm Next-net-intel Tree +M: Bruce Richardson T: git://dpdk.org/next/dpdk-next-net-intel Next-net-mrvl Tree @@ -112,6 +113,12 @@ F: license/ F: .editorconfig F: .mailmap +Linux kernel uAPI headers +M: Maxime Coquelin +F: devtools/linux-uapi.sh +F: doc/guides/contributing/linux_uapi.rst +F: kernel/linux/uapi/ + Build System M: Bruce Richardson F: Makefile @@ -186,6 +193,7 @@ F: app/test/test_threads.c F: app/test/test_version.c Logging +M: Stephen Hemminger F: lib/log/ F: doc/guides/prog_guide/log_lib.rst F: app/test/test_logs.c @@ -220,6 +228,13 @@ F: app/test/test_malloc_perf.c F: app/test/test_memory.c F: app/test/test_memzone.c +Lcore Variables +M: Mattias Rönnblom +F: lib/eal/include/rte_lcore_var.h +F: lib/eal/common/eal_common_lcore_var.c +F: app/test/test_lcore_var* +F: doc/guides/prog_guide/lcore_var.rst + Interrupt Subsystem M: Harman Kalra F: lib/eal/include/rte_epoll.h @@ -770,6 +785,8 @@ F: doc/guides/nics/features/e1000.ini F: doc/guides/nics/features/igb*.ini Intel ixgbe +M: Anatoly Burakov +M: Vladimir Medvedkin T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/ixgbe/ F: doc/guides/nics/ixgbe.rst @@ -777,6 +794,8 @@ F: doc/guides/nics/intel_vf.rst F: doc/guides/nics/features/ixgbe*.ini Intel i40e +M: Ian Stokes +M: Bruce Richardson T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/i40e/ F: doc/guides/nics/i40e.rst @@ -790,13 +809,16 @@ F: doc/guides/nics/fm10k.rst F: doc/guides/nics/features/fm10k*.ini Intel iavf -M: Jingjing Wu +M: Vladimir Medvedkin +M: Ian Stokes T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/iavf/ F: drivers/common/iavf/ F: doc/guides/nics/features/iavf*.ini Intel ice +M: Bruce Richardson +M: Anatoly Burakov T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/ice/ F: doc/guides/nics/ice.rst @@ -804,6 +826,7 @@ F: doc/guides/nics/features/ice.ini Intel idpf M: Jingjing Wu +M: Praveen Shetty T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/idpf/ F: drivers/common/idpf/ @@ -811,6 +834,7 @@ F: doc/guides/nics/idpf.rst F: doc/guides/nics/features/idpf.ini Intel cpfl - EXPERIMENTAL +M: Praveen Shetty T: git://dpdk.org/next/dpdk-next-net-intel F: drivers/net/cpfl/ F: doc/guides/nics/cpfl.rst @@ -1042,6 +1066,13 @@ F: drivers/net/avp/ F: doc/guides/nics/avp.rst F: doc/guides/nics/features/avp.ini +ZTE zxdh - EXPERIMENTAL +M: Junlong Wang +M: Lijie Shan +F: drivers/net/zxdh/ +F: doc/guides/nics/zxdh.rst +F: doc/guides/nics/features/zxdh.ini + PCAP PMD F: drivers/net/pcap/ F: doc/guides/nics/pcap_ring.rst @@ -1393,7 +1424,7 @@ M: Pavan Nikhilesh F: drivers/event/octeontx/timvf_* Intel DLB2 -M: Abdullah Sevincer +M: Pravin Pathak F: drivers/event/dlb2/ F: doc/guides/eventdevs/dlb2.rst @@ -1503,6 +1534,12 @@ T: git://dpdk.org/next/dpdk-next-net-mrvl F: doc/guides/rawdevs/cnxk_gpio.rst F: drivers/raw/cnxk_gpio/ +Marvell CNXK RVU LF +M: Akhil Goyal +T: git://dpdk.org/next/dpdk-next-net-mrvl +F: drivers/raw/cnxk_rvu_lf/ +F: doc/guides/rawdevs/cnxk_rvu_lf.rst + NTB M: Jingjing Wu F: drivers/raw/ntb/ @@ -1744,6 +1781,7 @@ M: David Hunt M: Sivaprasad Tummala F: lib/power/ F: doc/guides/prog_guide/power_man.rst +F: drivers/power/ F: app/test/test_power* F: examples/l3fwd-power/ F: doc/guides/sample_app_ug/l3_forward_power_man.rst diff --git a/VERSION b/VERSION index 2f350d29ec..091d5c1b39 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -24.11.0-rc1 +24.11.0-rc2 diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c index fcfaa19951..3d3c0dbc66 100644 --- a/app/dumpcap/main.c +++ b/app/dumpcap/main.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -95,7 +96,6 @@ struct interface { struct rte_bpf_prm *bpf_prm; char name[RTE_ETH_NAME_MAX_LEN]; - struct rte_rxtx_callback *rx_cb[RTE_MAX_QUEUES_PER_PORT]; const char *ifname; const char *ifdescr; }; @@ -677,7 +677,7 @@ static struct rte_ring *create_ring(void) /* Find next power of 2 >= size. */ size = ring_size; - log2 = sizeof(size) * 8 - __builtin_clzl(size - 1); + log2 = sizeof(size) * 8 - rte_clz64(size - 1); size = 1u << log2; if (size != ring_size) { @@ -800,9 +800,10 @@ static dumpcap_out_t create_output(void) free(os); TAILQ_FOREACH(intf, &interfaces, next) { - rte_pcapng_add_interface(ret.pcapng, intf->port, - intf->ifname, intf->ifdescr, - intf->opts.filter); + if (rte_pcapng_add_interface(ret.pcapng, intf->port, intf->ifname, + intf->ifdescr, intf->opts.filter) < 0) + rte_exit(EXIT_FAILURE, "rte_pcapng_add_interface %u failed\n", + intf->port); } } else { pcap_t *pcap; diff --git a/app/graph/ethdev.c b/app/graph/ethdev.c index b890efecb5..d343235dae 100644 --- a/app/graph/ethdev.c +++ b/app/graph/ethdev.c @@ -208,10 +208,22 @@ ethdev_show(const char *name) if (rc < 0) return rc; - rte_eth_dev_info_get(port_id, &info); - rte_eth_stats_get(port_id, &stats); - rte_eth_macaddr_get(port_id, &addr); - rte_eth_link_get(port_id, &link); + rc = rte_eth_dev_info_get(port_id, &info); + if (rc < 0) + return rc; + + rc = rte_eth_link_get(port_id, &link); + if (rc < 0) + return rc; + + rc = rte_eth_stats_get(port_id, &stats); + if (rc < 0) + return rc; + + rc = rte_eth_macaddr_get(port_id, &addr); + if (rc < 0) + return rc; + rte_eth_dev_get_mtu(port_id, &mtu); length = strlen(conn->msg_out); diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c index 9be360b225..6ee1ca34b2 100644 --- a/app/test-bbdev/test_bbdev_perf.c +++ b/app/test-bbdev/test_bbdev_perf.c @@ -111,6 +111,8 @@ static uint32_t ldpc_cap_flags; /* FFT window width predefined on device and on vector. */ static int fft_window_width_dev; +bool dump_ops = true; + /* Represents tested active devices */ static struct active_device { const char *driver_name; @@ -3109,6 +3111,20 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id, /* Run test case function */ t_ret = test_case_func(ad, op_params); + if (dump_ops) { + /* Dump queue information in local file. */ + static FILE *fd; + fd = fopen("./dump_bbdev_queue_ops.txt", "w"); + if (fd == NULL) { + printf("Open dump file error.\n"); + return -1; + } + rte_bbdev_queue_ops_dump(ad->dev_id, ad->queue_ids[i], fd); + fclose(fd); + /* Run it once only. */ + dump_ops = false; + } + /* Free active device resources and return */ free_buffers(ad, op_params); return t_ret; @@ -3519,6 +3535,10 @@ throughput_intr_lcore_ldpc_dec(void *arg) rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed); } + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_disable(tp->dev_id, queue_id), + "Failed to disable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + return TEST_SUCCESS; } @@ -3613,6 +3633,10 @@ throughput_intr_lcore_dec(void *arg) rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed); } + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_disable(tp->dev_id, queue_id), + "Failed to disable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + return TEST_SUCCESS; } @@ -3702,6 +3726,10 @@ throughput_intr_lcore_enc(void *arg) rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed); } + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_disable(tp->dev_id, queue_id), + "Failed to disable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + return TEST_SUCCESS; } @@ -3794,6 +3822,10 @@ throughput_intr_lcore_ldpc_enc(void *arg) rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed); } + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_disable(tp->dev_id, queue_id), + "Failed to disable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + return TEST_SUCCESS; } @@ -3885,6 +3917,10 @@ throughput_intr_lcore_fft(void *arg) rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed); } + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_disable(tp->dev_id, queue_id), + "Failed to disable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + return TEST_SUCCESS; } @@ -3970,6 +4006,10 @@ throughput_intr_lcore_mldts(void *arg) rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed); } + TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_disable(tp->dev_id, queue_id), + "Failed to disable interrupts for dev: %u, queue_id: %u", + tp->dev_id, queue_id); + return TEST_SUCCESS; } diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index b111690b7c..204117ef7f 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -74,8 +74,6 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, struct rte_udp_hdr *udp_hdr, uint16_t pkt_data_len, uint8_t port, uint8_t flow) { - uint16_t *ptr16; - uint32_t ip_cksum; uint16_t pkt_len; /* @@ -104,28 +102,7 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, /* * Compute IP header checksum. */ - ptr16 = (unaligned_uint16_t *)ip_hdr; - ip_cksum = 0; - ip_cksum += ptr16[0]; - ip_cksum += ptr16[1]; - ip_cksum += ptr16[2]; - ip_cksum += ptr16[3]; - ip_cksum += ptr16[4]; - ip_cksum += ptr16[6]; - ip_cksum += ptr16[7]; - ip_cksum += ptr16[8]; - ip_cksum += ptr16[9]; - - /* - * Reduce 32 bit checksum to 16 bits and complement it. - */ - ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + (ip_cksum & 0x0000FFFF); - if (ip_cksum > 65535) - ip_cksum -= 65535; - ip_cksum = (~ip_cksum) & 0x0000FFFF; - if (ip_cksum == 0) - ip_cksum = 0xFFFF; - ip_hdr->hdr_checksum = (uint16_t)ip_cksum; + ip_hdr->hdr_checksum = rte_ipv4_cksum_simple(ip_hdr); } static void diff --git a/app/test-pmd/hairpin.c b/app/test-pmd/hairpin.c new file mode 100644 index 0000000000..5e6b37974e --- /dev/null +++ b/app/test-pmd/hairpin.c @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 NVIDIA Corporation & Affiliates + */ + +#include +#include +#include +#include +#include + +#include + +#include "testpmd.h" + +/* Hairpin ports configuration mode. */ +uint32_t hairpin_mode; + +bool hairpin_multiport_mode; + +queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ + +static LIST_HEAD(, hairpin_map) hairpin_map_head = LIST_HEAD_INITIALIZER(); + +struct hairpin_map { + LIST_ENTRY(hairpin_map) entry; /**< List entry. */ + portid_t rx_port; /**< Hairpin Rx port ID. */ + portid_t tx_port; /**< Hairpin Tx port ID. */ + uint16_t rxq_head; /**< Hairpin Rx queue head. */ + uint16_t txq_head; /**< Hairpin Tx queue head. */ + uint16_t qnum; /**< Hairpin queues number. */ +}; + +void +hairpin_add_multiport_map(struct hairpin_map *map) +{ + LIST_INSERT_HEAD(&hairpin_map_head, map, entry); +} + +/* + * Get the allowed maximum number of hairpin queues. + * *pid return the port id which has minimal value of + * max_hairpin_queues in all ports. + */ +queueid_t +get_allowed_max_nb_hairpinq(portid_t *pid) +{ + queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; + portid_t pi; + struct rte_eth_hairpin_cap cap; + + RTE_ETH_FOREACH_DEV(pi) { + if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { + *pid = pi; + return 0; + } + if (cap.max_nb_queues < allowed_max_hairpinq) { + allowed_max_hairpinq = cap.max_nb_queues; + *pid = pi; + } + } + return allowed_max_hairpinq; +} + +/* + * Check input hairpin is valid or not. + * If input hairpin is not greater than any of maximum number + * of hairpin queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_hairpinq(queueid_t hairpinq) +{ + queueid_t allowed_max_hairpinq; + portid_t pid = 0; + + allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); + if (hairpinq > allowed_max_hairpinq) { + fprintf(stderr, + "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", + hairpinq, allowed_max_hairpinq, pid); + return -1; + } + return 0; +} + +#define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8) +#define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9) + +#define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12) +#define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13) + +#define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16) +#define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17) + +static int +port_config_hairpin_rxq(portid_t pi, uint16_t peer_tx_port, + queueid_t rxq_head, queueid_t txq_head, + uint16_t qcount, uint32_t manual_bind) +{ + int diag; + queueid_t i, qi; + struct rte_port *port = &ports[pi]; + struct rte_eth_hairpin_conf hairpin_conf = { + .peer_count = 1, + .peers[0].port = peer_tx_port, + .manual_bind = manual_bind, + .tx_explicit = !!(hairpin_mode & 0x10), + .force_memory = !!(hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY), + .use_locked_device_memory = + !!(hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY), + .use_rte_memory = !!(hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY), + }; + + for (qi = rxq_head, i = 0; qi < rxq_head + qcount; qi++, i++) { + hairpin_conf.peers[0].queue = i + txq_head; + diag = rte_eth_rx_hairpin_queue_setup(pi, qi, nb_rxd, &hairpin_conf); + if (diag == 0) + continue; + + /* Fail to setup rx queue, return */ + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else + fprintf(stderr, + "Port %d can not be set back to stopped\n", pi); + fprintf(stderr, + "Port %u failed to configure hairpin on rxq %u.\n" + "Peer port: %u peer txq: %u\n", + pi, qi, peer_tx_port, i); + /* try to reconfigure queues next time */ + port->need_reconfig_queues = 1; + return -1; + } + return 0; +} + +static int +port_config_hairpin_txq(portid_t pi, uint16_t peer_rx_port, + queueid_t rxq_head, queueid_t txq_head, + uint16_t qcount, uint32_t manual_bind) +{ + int diag; + queueid_t i, qi; + struct rte_port *port = &ports[pi]; + struct rte_eth_hairpin_conf hairpin_conf = { + .peer_count = 1, + .peers[0].port = peer_rx_port, + .manual_bind = manual_bind, + .tx_explicit = !!(hairpin_mode & 0x10), + .force_memory = !!(hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY), + .use_locked_device_memory = + !!(hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY), + .use_rte_memory = !!(hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY), + }; + + for (qi = txq_head, i = 0; qi < txq_head + qcount; qi++, i++) { + hairpin_conf.peers[0].queue = i + rxq_head; + diag = rte_eth_tx_hairpin_queue_setup(pi, qi, nb_txd, &hairpin_conf); + if (diag == 0) + continue; + + /* Fail to setup rx queue, return */ + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else + fprintf(stderr, + "Port %d can not be set back to stopped\n", pi); + fprintf(stderr, + "Port %d failed to configure hairpin on txq %u.\n" + "Peer port: %u peer rxq: %u\n", + pi, qi, peer_rx_port, i); + /* try to reconfigure queues next time */ + port->need_reconfig_queues = 1; + return -1; + } + return 0; +} + +static int +setup_legacy_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) +{ + int diag; + uint16_t peer_rx_port = pi; + uint16_t peer_tx_port = pi; + uint32_t manual = 1; + + if (!(hairpin_mode & 0xf)) { + peer_rx_port = pi; + peer_tx_port = pi; + manual = 0; + } else if (hairpin_mode & 0x1) { + peer_tx_port = rte_eth_find_next_owned_by(pi + 1, + RTE_ETH_DEV_NO_OWNER); + if (peer_tx_port >= RTE_MAX_ETHPORTS) + peer_tx_port = rte_eth_find_next_owned_by(0, + RTE_ETH_DEV_NO_OWNER); + if (p_pi != RTE_MAX_ETHPORTS) { + peer_rx_port = p_pi; + } else { + uint16_t next_pi; + + /* Last port will be the peer RX port of the first. */ + RTE_ETH_FOREACH_DEV(next_pi) + peer_rx_port = next_pi; + } + manual = 1; + } else if (hairpin_mode & 0x2) { + if (cnt_pi & 0x1) { + peer_rx_port = p_pi; + } else { + peer_rx_port = rte_eth_find_next_owned_by(pi + 1, + RTE_ETH_DEV_NO_OWNER); + if (peer_rx_port >= RTE_MAX_ETHPORTS) + peer_rx_port = pi; + } + peer_tx_port = peer_rx_port; + manual = 1; + } + diag = port_config_hairpin_txq(pi, peer_rx_port, nb_rxq, nb_txq, + nb_hairpinq, manual); + if (diag) + return diag; + diag = port_config_hairpin_rxq(pi, peer_tx_port, nb_rxq, nb_txq, + nb_hairpinq, manual); + if (diag) + return diag; + return 0; +} + +static int +setup_mapped_harpin_queues(portid_t pi) +{ + int ret = 0; + struct hairpin_map *map; + + LIST_FOREACH(map, &hairpin_map_head, entry) { + if (map->rx_port == pi) { + ret = port_config_hairpin_rxq(pi, map->tx_port, + map->rxq_head, + map->txq_head, + map->qnum, true); + if (ret) + return ret; + } + if (map->tx_port == pi) { + ret = port_config_hairpin_txq(pi, map->rx_port, + map->rxq_head, + map->txq_head, + map->qnum, true); + if (ret) + return ret; + } + } + return 0; +} + +/* Configure the Rx and Tx hairpin queues for the selected port. */ +int +setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) +{ + if (hairpin_multiport_mode) + return setup_mapped_harpin_queues(pi); + + return setup_legacy_hairpin_queues(pi, p_pi, cnt_pi); +} + +int +hairpin_bind(uint16_t cfg_pi, portid_t *pl, portid_t *peer_pl) +{ + uint16_t i; + portid_t pi; + int peer_pi; + int diag; + int j; + + /* bind all started hairpin ports */ + for (i = 0; i < cfg_pi; i++) { + pi = pl[i]; + /* bind current Tx to all peer Rx */ + peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, + RTE_MAX_ETHPORTS, 1); + if (peer_pi < 0) + return peer_pi; + for (j = 0; j < peer_pi; j++) { + if (!port_is_started(peer_pl[j])) + continue; + diag = rte_eth_hairpin_bind(pi, peer_pl[j]); + if (diag < 0) { + fprintf(stderr, + "Error during binding hairpin Tx port %u to %u: %s\n", + pi, peer_pl[j], + rte_strerror(-diag)); + return -1; + } + } + /* bind all peer Tx to current Rx */ + peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, + RTE_MAX_ETHPORTS, 0); + if (peer_pi < 0) + return peer_pi; + for (j = 0; j < peer_pi; j++) { + if (!port_is_started(peer_pl[j])) + continue; + diag = rte_eth_hairpin_bind(peer_pl[j], pi); + if (diag < 0) { + fprintf(stderr, + "Error during binding hairpin Tx port %u to %u: %s\n", + peer_pl[j], pi, + rte_strerror(-diag)); + return -1; + } + } + } + return 0; +} + +void +hairpin_map_usage(void) +{ + printf(" --hairpin-map=rxpi:rxq:txpi:txq:n: hairpin map.\n" + " rxpi - Rx port index.\n" + " rxq - Rx queue.\n" + " txpi - Tx port index.\n" + " txq - Tx queue.\n" + " n - hairpin queues number.\n"); +} + +int +parse_hairpin_map(const char *hpmap) +{ + /* + * Testpmd hairpin map format: + * + */ + int ret; + struct hairpin_map *map = calloc(1, sizeof(*map)); + + if (!map) + return -ENOMEM; + + ret = sscanf(hpmap, "%hu:%hu:%hu:%hu:%hu", + &map->rx_port, &map->rxq_head, + &map->tx_port, &map->txq_head, &map->qnum); + if (ret != 5) { + free(map); + return -EINVAL; + } + hairpin_add_multiport_map(map); + return 0; +} diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c index 4ef23ae67a..c87b7a80df 100644 --- a/app/test-pmd/icmpecho.c +++ b/app/test-pmd/icmpecho.c @@ -241,27 +241,6 @@ ipv4_addr_dump(const char *what, uint32_t be_ipv4_addr) printf("%s", buf); } -static uint16_t -ipv4_hdr_cksum(struct rte_ipv4_hdr *ip_h) -{ - uint16_t *v16_h; - uint32_t ip_cksum; - - /* - * Compute the sum of successive 16-bit words of the IPv4 header, - * skipping the checksum field of the header. - */ - v16_h = (unaligned_uint16_t *) ip_h; - ip_cksum = v16_h[0] + v16_h[1] + v16_h[2] + v16_h[3] + - v16_h[4] + v16_h[6] + v16_h[7] + v16_h[8] + v16_h[9]; - - /* reduce 32 bit checksum to 16 bits and complement it */ - ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16); - ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16); - ip_cksum = (~ip_cksum) & 0x0000FFFF; - return (ip_cksum == 0) ? 0xFFFF : (uint16_t) ip_cksum; -} - #define is_multicast_ipv4_addr(ipv4_addr) \ (((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0) @@ -458,7 +437,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs) ip_src = (ip_src & 0xFFFFFFFC) | 0x00000001; ip_h->src_addr = rte_cpu_to_be_32(ip_src); ip_h->dst_addr = ip_addr; - ip_h->hdr_checksum = ipv4_hdr_cksum(ip_h); + ip_h->hdr_checksum = rte_ipv4_cksum_simple(ip_h); } else { ip_h->src_addr = ip_h->dst_addr; ip_h->dst_addr = ip_addr; diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build index 719f875be0..f1c36529b4 100644 --- a/app/test-pmd/meson.build +++ b/app/test-pmd/meson.build @@ -15,6 +15,7 @@ sources = files( 'config.c', 'csumonly.c', 'flowgen.c', + 'hairpin.c', 'icmpecho.c', 'ieee1588fwd.c', 'iofwd.c', diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 22364e09ab..7b31b94542 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -143,6 +143,8 @@ enum { TESTPMD_OPT_HAIRPINQ_NUM, #define TESTPMD_OPT_HAIRPIN_MODE "hairpin-mode" TESTPMD_OPT_HAIRPIN_MODE_NUM, +#define TESTPMD_OPT_HAIRPIN_MAP "hairpin-map" + TESTPMD_OPT_HAIRPIN_MAP_NUM, #define TESTPMD_OPT_BURST "burst" TESTPMD_OPT_BURST_NUM, #define TESTPMD_OPT_FLOWGEN_CLONES "flowgen-clones" @@ -317,6 +319,7 @@ static const struct option long_options[] = { REQUIRED_ARG(TESTPMD_OPT_TXD), REQUIRED_ARG(TESTPMD_OPT_HAIRPINQ), REQUIRED_ARG(TESTPMD_OPT_HAIRPIN_MODE), + REQUIRED_ARG(TESTPMD_OPT_HAIRPIN_MAP), REQUIRED_ARG(TESTPMD_OPT_BURST), REQUIRED_ARG(TESTPMD_OPT_FLOWGEN_CLONES), REQUIRED_ARG(TESTPMD_OPT_FLOWGEN_FLOWS), @@ -542,6 +545,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" " 0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" " 0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + hairpin_map_usage(); } static int @@ -1317,6 +1321,12 @@ launch_args_parse(int argc, char** argv) hairpin_mode = (uint32_t)n; break; } + case TESTPMD_OPT_HAIRPIN_MAP_NUM: + hairpin_multiport_mode = true; + ret = parse_hairpin_map(optarg); + if (ret) + rte_exit(EXIT_FAILURE, "invalid hairpin map\n"); + break; case TESTPMD_OPT_BURST_NUM: n = atoi(optarg); if (n == 0) { diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index b1401136e4..f487769578 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -284,7 +284,6 @@ uint8_t dcb_config = 0; /* * Configurable number of RX/TX queues. */ -queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ queueid_t nb_txq = 1; /**< Number of TX queues per port. */ @@ -431,9 +430,6 @@ bool setup_on_probe_event = true; /* Clear ptypes on port initialization. */ uint8_t clear_ptypes = true; -/* Hairpin ports configuration mode. */ -uint32_t hairpin_mode; - /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1555,54 +1551,6 @@ check_nb_txd(queueid_t txd) return 0; } - -/* - * Get the allowed maximum number of hairpin queues. - * *pid return the port id which has minimal value of - * max_hairpin_queues in all ports. - */ -queueid_t -get_allowed_max_nb_hairpinq(portid_t *pid) -{ - queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; - portid_t pi; - struct rte_eth_hairpin_cap cap; - - RTE_ETH_FOREACH_DEV(pi) { - if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { - *pid = pi; - return 0; - } - if (cap.max_nb_queues < allowed_max_hairpinq) { - allowed_max_hairpinq = cap.max_nb_queues; - *pid = pi; - } - } - return allowed_max_hairpinq; -} - -/* - * Check input hairpin is valid or not. - * If input hairpin is not greater than any of maximum number - * of hairpin queues of all ports, it is valid. - * if valid, return 0, else return -1 - */ -int -check_nb_hairpinq(queueid_t hairpinq) -{ - queueid_t allowed_max_hairpinq; - portid_t pid = 0; - - allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); - if (hairpinq > allowed_max_hairpinq) { - fprintf(stderr, - "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n", - hairpinq, allowed_max_hairpinq, pid); - return -1; - } - return 0; -} - static int get_eth_overhead(struct rte_eth_dev_info *dev_info) { @@ -2684,126 +2632,6 @@ port_is_started(portid_t port_id) return 1; } -#define HAIRPIN_MODE_RX_FORCE_MEMORY RTE_BIT32(8) -#define HAIRPIN_MODE_TX_FORCE_MEMORY RTE_BIT32(9) - -#define HAIRPIN_MODE_RX_LOCKED_MEMORY RTE_BIT32(12) -#define HAIRPIN_MODE_RX_RTE_MEMORY RTE_BIT32(13) - -#define HAIRPIN_MODE_TX_LOCKED_MEMORY RTE_BIT32(16) -#define HAIRPIN_MODE_TX_RTE_MEMORY RTE_BIT32(17) - - -/* Configure the Rx and Tx hairpin queues for the selected port. */ -static int -setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) -{ - queueid_t qi; - struct rte_eth_hairpin_conf hairpin_conf = { - .peer_count = 1, - }; - int i; - int diag; - struct rte_port *port = &ports[pi]; - uint16_t peer_rx_port = pi; - uint16_t peer_tx_port = pi; - uint32_t manual = 1; - uint32_t tx_exp = hairpin_mode & 0x10; - uint32_t rx_force_memory = hairpin_mode & HAIRPIN_MODE_RX_FORCE_MEMORY; - uint32_t rx_locked_memory = hairpin_mode & HAIRPIN_MODE_RX_LOCKED_MEMORY; - uint32_t rx_rte_memory = hairpin_mode & HAIRPIN_MODE_RX_RTE_MEMORY; - uint32_t tx_force_memory = hairpin_mode & HAIRPIN_MODE_TX_FORCE_MEMORY; - uint32_t tx_locked_memory = hairpin_mode & HAIRPIN_MODE_TX_LOCKED_MEMORY; - uint32_t tx_rte_memory = hairpin_mode & HAIRPIN_MODE_TX_RTE_MEMORY; - - if (!(hairpin_mode & 0xf)) { - peer_rx_port = pi; - peer_tx_port = pi; - manual = 0; - } else if (hairpin_mode & 0x1) { - peer_tx_port = rte_eth_find_next_owned_by(pi + 1, - RTE_ETH_DEV_NO_OWNER); - if (peer_tx_port >= RTE_MAX_ETHPORTS) - peer_tx_port = rte_eth_find_next_owned_by(0, - RTE_ETH_DEV_NO_OWNER); - if (p_pi != RTE_MAX_ETHPORTS) { - peer_rx_port = p_pi; - } else { - uint16_t next_pi; - - /* Last port will be the peer RX port of the first. */ - RTE_ETH_FOREACH_DEV(next_pi) - peer_rx_port = next_pi; - } - manual = 1; - } else if (hairpin_mode & 0x2) { - if (cnt_pi & 0x1) { - peer_rx_port = p_pi; - } else { - peer_rx_port = rte_eth_find_next_owned_by(pi + 1, - RTE_ETH_DEV_NO_OWNER); - if (peer_rx_port >= RTE_MAX_ETHPORTS) - peer_rx_port = pi; - } - peer_tx_port = peer_rx_port; - manual = 1; - } - - for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { - hairpin_conf.peers[0].port = peer_rx_port; - hairpin_conf.peers[0].queue = i + nb_rxq; - hairpin_conf.manual_bind = !!manual; - hairpin_conf.tx_explicit = !!tx_exp; - hairpin_conf.force_memory = !!tx_force_memory; - hairpin_conf.use_locked_device_memory = !!tx_locked_memory; - hairpin_conf.use_rte_memory = !!tx_rte_memory; - diag = rte_eth_tx_hairpin_queue_setup - (pi, qi, nb_txd, &hairpin_conf); - i++; - if (diag == 0) - continue; - - /* Fail to setup rx queue, return */ - if (port->port_status == RTE_PORT_HANDLING) - port->port_status = RTE_PORT_STOPPED; - else - fprintf(stderr, - "Port %d can not be set back to stopped\n", pi); - fprintf(stderr, "Fail to configure port %d hairpin queues\n", - pi); - /* try to reconfigure queues next time */ - port->need_reconfig_queues = 1; - return -1; - } - for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { - hairpin_conf.peers[0].port = peer_tx_port; - hairpin_conf.peers[0].queue = i + nb_txq; - hairpin_conf.manual_bind = !!manual; - hairpin_conf.tx_explicit = !!tx_exp; - hairpin_conf.force_memory = !!rx_force_memory; - hairpin_conf.use_locked_device_memory = !!rx_locked_memory; - hairpin_conf.use_rte_memory = !!rx_rte_memory; - diag = rte_eth_rx_hairpin_queue_setup - (pi, qi, nb_rxd, &hairpin_conf); - i++; - if (diag == 0) - continue; - - /* Fail to setup rx queue, return */ - if (port->port_status == RTE_PORT_HANDLING) - port->port_status = RTE_PORT_STOPPED; - else - fprintf(stderr, - "Port %d can not be set back to stopped\n", pi); - fprintf(stderr, "Fail to configure port %d hairpin queues\n", - pi); - /* try to reconfigure queues next time */ - port->need_reconfig_queues = 1; - return -1; - } - return 0; -} - /* Configure the Rx with optional split. */ int rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, @@ -3043,7 +2871,6 @@ start_port(portid_t pid) portid_t peer_pl[RTE_MAX_ETHPORTS]; uint16_t cnt_pi = 0; uint16_t cfg_pi = 0; - int peer_pi; queueid_t qi; struct rte_port *port; struct rte_eth_hairpin_cap cap; @@ -3304,47 +3131,9 @@ start_port(portid_t pid) fprintf(stderr, "Please stop the ports first\n"); if (hairpin_mode & 0xf) { - uint16_t i; - int j; - - /* bind all started hairpin ports */ - for (i = 0; i < cfg_pi; i++) { - pi = pl[i]; - /* bind current Tx to all peer Rx */ - peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, - RTE_MAX_ETHPORTS, 1); - if (peer_pi < 0) - return peer_pi; - for (j = 0; j < peer_pi; j++) { - if (!port_is_started(peer_pl[j])) - continue; - diag = rte_eth_hairpin_bind(pi, peer_pl[j]); - if (diag < 0) { - fprintf(stderr, - "Error during binding hairpin Tx port %u to %u: %s\n", - pi, peer_pl[j], - rte_strerror(-diag)); - return -1; - } - } - /* bind all peer Tx to current Rx */ - peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, - RTE_MAX_ETHPORTS, 0); - if (peer_pi < 0) - return peer_pi; - for (j = 0; j < peer_pi; j++) { - if (!port_is_started(peer_pl[j])) - continue; - diag = rte_eth_hairpin_bind(peer_pl[j], pi); - if (diag < 0) { - fprintf(stderr, - "Error during binding hairpin Tx port %u to %u: %s\n", - peer_pl[j], pi, - rte_strerror(-diag)); - return -1; - } - } - } + diag = hairpin_bind(cfg_pi, pl, peer_pl); + if (diag < 0) + return -1; } fill_xstats_display_info_for_port(pid); diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 131ea53f84..314482e69c 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -126,6 +126,16 @@ enum noisy_fwd_mode { NOISY_FWD_MODE_MAX, }; +/** + * Command line arguments parser sets `hairpin_multiport_mode` to True + * if explicit hairpin map configuration mode was used. + */ +extern bool hairpin_multiport_mode; + +/** Hairpin maps list. */ +struct hairpin_map; +extern void hairpin_add_multiport_map(struct hairpin_map *map); + /** * The data structure associated with RX and TX packet burst statistics * that are recorded for each forwarding stream. @@ -1255,6 +1265,10 @@ extern int flow_parse(const char *src, void *result, unsigned int size, struct rte_flow_attr **attr, struct rte_flow_item **pattern, struct rte_flow_action **actions); +int setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi); +int hairpin_bind(uint16_t cfg_pi, portid_t *pl, portid_t *peer_pl); +void hairpin_map_usage(void); +int parse_hairpin_map(const char *hpmap); uint64_t str_to_rsstypes(const char *str); const char *rsstypes_to_str(uint64_t rss_type); diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index c2b88764be..59d821a22d 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -106,8 +106,6 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, struct rte_udp_hdr *udp_hdr, uint16_t pkt_data_len) { - uint16_t *ptr16; - uint32_t ip_cksum; uint16_t pkt_len; /* @@ -136,25 +134,7 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, /* * Compute IP header checksum. */ - ptr16 = (unaligned_uint16_t*) ip_hdr; - ip_cksum = 0; - ip_cksum += ptr16[0]; ip_cksum += ptr16[1]; - ip_cksum += ptr16[2]; ip_cksum += ptr16[3]; - ip_cksum += ptr16[4]; - ip_cksum += ptr16[6]; ip_cksum += ptr16[7]; - ip_cksum += ptr16[8]; ip_cksum += ptr16[9]; - - /* - * Reduce 32 bit checksum to 16 bits and complement it. - */ - ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + - (ip_cksum & 0x0000FFFF); - if (ip_cksum > 65535) - ip_cksum -= 65535; - ip_cksum = (~ip_cksum) & 0x0000FFFF; - if (ip_cksum == 0) - ip_cksum = 0xFFFF; - ip_hdr->hdr_checksum = (uint16_t) ip_cksum; + ip_hdr->hdr_checksum = rte_ipv4_cksum_simple(ip_hdr); } static inline void diff --git a/app/test/meson.build b/app/test/meson.build index 0f7e11969a..40f22a54d5 100644 --- a/app/test/meson.build +++ b/app/test/meson.build @@ -104,6 +104,8 @@ source_file_deps = { 'test_ipsec_sad.c': ['ipsec'], 'test_kvargs.c': ['kvargs'], 'test_latencystats.c': ['ethdev', 'latencystats', 'metrics'] + sample_packet_forward_deps, + 'test_lcore_var.c': [], + 'test_lcore_var_perf.c': [], 'test_lcores.c': [], 'test_link_bonding.c': ['ethdev', 'net_bond', 'net'] + packet_burst_generator_deps + virtual_pmd_deps, diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c index c9ff5257f0..4c17737739 100644 --- a/app/test/packet_burst_generator.c +++ b/app/test/packet_burst_generator.c @@ -159,8 +159,6 @@ initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr, uint32_t dst_addr, uint16_t pkt_data_len) { uint16_t pkt_len; - unaligned_uint16_t *ptr16; - uint32_t ip_cksum; /* * Initialize IP header. @@ -177,27 +175,7 @@ initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr, ip_hdr->src_addr = rte_cpu_to_be_32(src_addr); ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr); - /* - * Compute IP header checksum. - */ - ptr16 = (unaligned_uint16_t *)ip_hdr; - ip_cksum = 0; - ip_cksum += ptr16[0]; ip_cksum += ptr16[1]; - ip_cksum += ptr16[2]; ip_cksum += ptr16[3]; - ip_cksum += ptr16[4]; - ip_cksum += ptr16[6]; ip_cksum += ptr16[7]; - ip_cksum += ptr16[8]; ip_cksum += ptr16[9]; - - /* - * Reduce 32 bit checksum to 16 bits and complement it. - */ - ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + - (ip_cksum & 0x0000FFFF); - ip_cksum %= 65536; - ip_cksum = (~ip_cksum) & 0x0000FFFF; - if (ip_cksum == 0) - ip_cksum = 0xFFFF; - ip_hdr->hdr_checksum = (uint16_t) ip_cksum; + ip_hdr->hdr_checksum = rte_ipv4_cksum_simple(ip_hdr); return pkt_len; } @@ -207,8 +185,6 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr, uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto) { uint16_t pkt_len; - unaligned_uint16_t *ptr16; - uint32_t ip_cksum; /* * Initialize IP header. @@ -224,28 +200,7 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr, ip_hdr->total_length = rte_cpu_to_be_16(pkt_len); ip_hdr->src_addr = rte_cpu_to_be_32(src_addr); ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr); - - /* - * Compute IP header checksum. - */ - ptr16 = (unaligned_uint16_t *)ip_hdr; - ip_cksum = 0; - ip_cksum += ptr16[0]; ip_cksum += ptr16[1]; - ip_cksum += ptr16[2]; ip_cksum += ptr16[3]; - ip_cksum += ptr16[4]; - ip_cksum += ptr16[6]; ip_cksum += ptr16[7]; - ip_cksum += ptr16[8]; ip_cksum += ptr16[9]; - - /* - * Reduce 32 bit checksum to 16 bits and complement it. - */ - ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + - (ip_cksum & 0x0000FFFF); - ip_cksum %= 65536; - ip_cksum = (~ip_cksum) & 0x0000FFFF; - if (ip_cksum == 0) - ip_cksum = 0xFFFF; - ip_hdr->hdr_checksum = (uint16_t) ip_cksum; + ip_hdr->hdr_checksum = rte_ipv4_cksum_simple(ip_hdr); return pkt_len; } diff --git a/app/test/test_bitops.c b/app/test/test_bitops.c index 681e984037..78a7df6bb1 100644 --- a/app/test/test_bitops.c +++ b/app/test/test_bitops.c @@ -13,6 +13,17 @@ #include #include "test.h" +static unsigned int +get_worker_lcore(void) +{ + unsigned int lcore_id = rte_get_next_lcore(-1, 1, 0); + + /* avoid checkers (like Coverity) false positives */ + RTE_VERIFY(lcore_id < RTE_MAX_LCORE); + + return lcore_id; +} + #define GEN_TEST_BIT_ACCESS(test_name, set_fun, clear_fun, assign_fun, flip_fun, test_fun, size, \ mod) \ static int \ @@ -158,7 +169,7 @@ test_bit_atomic_parallel_assign ## size(void) \ printf("Need multiple cores to run parallel test.\n"); \ return TEST_SKIPPED; \ } \ - worker_lcore_id = rte_get_next_lcore(-1, 1, 0); \ + worker_lcore_id = get_worker_lcore(); \ lmain.bit = rte_rand_max(size); \ do { \ lworker.bit = rte_rand_max(size); \ @@ -217,7 +228,7 @@ test_bit_atomic_parallel_test_and_modify ## size(void) \ printf("Need multiple cores to run parallel test.\n"); \ return TEST_SKIPPED; \ } \ - worker_lcore_id = rte_get_next_lcore(-1, 1, 0); \ + worker_lcore_id = get_worker_lcore(); \ int rc = rte_eal_remote_launch(run_parallel_test_and_modify ## size, &lworker, \ worker_lcore_id); \ TEST_ASSERT(rc == 0, "Worker thread launch failed"); \ @@ -266,7 +277,7 @@ test_bit_atomic_parallel_flip ## size(void) \ printf("Need multiple cores to run parallel test.\n"); \ return TEST_SKIPPED; \ } \ - worker_lcore_id = rte_get_next_lcore(-1, 1, 0); \ + worker_lcore_id = get_worker_lcore(); \ int rc = rte_eal_remote_launch(run_parallel_flip ## size, &lworker, worker_lcore_id); \ TEST_ASSERT(rc == 0, "Worker thread launch failed"); \ run_parallel_flip ## size(&lmain); \ diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index 25eef342b0..c647baeee1 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -2496,7 +2496,8 @@ test_queue_pair_descriptor_count(void) int qp_depth = 0; int i; - RTE_VERIFY(gbl_action_type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO); + if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) + return TEST_SKIPPED; /* Verify if the queue pair depth API is supported by driver */ qp_depth = rte_cryptodev_qp_depth_used(ts_params->valid_devs[0], 0); @@ -15135,6 +15136,10 @@ test_enq_callback_setup(void) uint16_t qp_id = 0; int j = 0; + /* Skip test if synchronous API is used */ + if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) + return TEST_SKIPPED; + /* Verify the crypto capabilities for which enqueue/dequeue is done. */ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; @@ -15256,6 +15261,10 @@ test_deq_callback_setup(void) uint16_t qp_id = 0; int j = 0; + /* Skip test if synchronous API is used */ + if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) + return TEST_SKIPPED; + /* Verify the crypto capabilities for which enqueue/dequeue is done. */ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c index 71d8dba731..d37d6b8627 100644 --- a/app/test/test_eal_flags.c +++ b/app/test/test_eal_flags.c @@ -985,12 +985,12 @@ test_misc_flags(void) /* With -v */ const char *argv2[] = {prgname, prefix, mp_flag, "-v"}; /* With valid --syslog */ - const char *argv3[] = {prgname, prefix, mp_flag, - "--syslog", "syslog"}; - /* With empty --syslog (should fail) */ + const char *argv3[] = {prgname, prefix, mp_flag, "--syslog=user"}; + /* With empty --syslog (now defaults) */ const char *argv4[] = {prgname, prefix, mp_flag, "--syslog"}; /* With invalid --syslog */ - const char *argv5[] = {prgname, prefix, mp_flag, "--syslog", "error"}; + const char *argv5[] = {prgname, prefix, mp_flag, "--syslog=invalid"}; + /* With no-sh-conf, also use no-huge to ensure this test runs on BSD */ const char *argv6[] = {prgname, "-m", DEFAULT_MEM_SIZE, no_shconf, nosh_prefix, no_huge}; @@ -1055,6 +1055,30 @@ test_misc_flags(void) const char * const argv22[] = {prgname, prefix, mp_flag, "--huge-worker-stack=512"}; + /* Try running with --log-timestamp */ + const char * const argv23[] = {prgname, prefix, mp_flag, + "--log-timestamp" }; + + /* Try running with --log-timestamp=iso */ + const char * const argv24[] = {prgname, prefix, mp_flag, + "--log-timestamp=iso" }; + + /* Try running with invalid timestamp */ + const char * const argv25[] = {prgname, prefix, mp_flag, + "--log-timestamp=invalid" }; + + /* Try running with --log-color */ + const char * const argv26[] = {prgname, prefix, mp_flag, + "--log-color" }; + + /* Try running with --log-color=never */ + const char * const argv27[] = {prgname, prefix, mp_flag, + "--log-color=never" }; + + /* Try running with --log-color=invalid */ + const char * const argv28[] = {prgname, prefix, mp_flag, + "--log-color=invalid" }; + /* run all tests also applicable to FreeBSD first */ if (launch_proc(argv0) == 0) { @@ -1080,15 +1104,15 @@ test_misc_flags(void) #endif if (launch_proc(argv3) != 0) { - printf("Error - process did not run ok with --syslog flag\n"); + printf("Error - process did not run ok with --syslog=user flag\n"); goto fail; } - if (launch_proc(argv4) == 0) { - printf("Error - process run ok with empty --syslog flag\n"); + if (launch_proc(argv4) != 0) { + printf("Error - process did not run ok with --syslog flag\n"); goto fail; } if (launch_proc(argv5) == 0) { - printf("Error - process run ok with invalid --syslog flag\n"); + printf("Error - process run ok with --syslog=invalid flag\n"); goto fail; } if (launch_proc(argv7) != 0) { @@ -1162,6 +1186,30 @@ test_misc_flags(void) printf("Error - process did not run ok with --huge-worker-stack=size parameter\n"); goto fail; } + if (launch_proc(argv23) != 0) { + printf("Error - process did not run ok with --log-timestamp parameter\n"); + goto fail; + } + if (launch_proc(argv24) != 0) { + printf("Error - process did not run ok with --log-timestamp=iso parameter\n"); + goto fail; + } + if (launch_proc(argv25) == 0) { + printf("Error - process did run ok with --log-timestamp=invalid parameter\n"); + goto fail; + } + if (launch_proc(argv26) != 0) { + printf("Error - process did not run ok with --log-color parameter\n"); + goto fail; + } + if (launch_proc(argv27) != 0) { + printf("Error - process did not run ok with --log-color=never parameter\n"); + goto fail; + } + if (launch_proc(argv28) == 0) { + printf("Error - process did run ok with --log-timestamp=invalid parameter\n"); + goto fail; + } rmdir(hugepath_dir3); rmdir(hugepath_dir2); diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c index a9258d2abc..e97754bd47 100644 --- a/app/test/test_eventdev.c +++ b/app/test/test_eventdev.c @@ -1450,7 +1450,7 @@ static struct unit_test_suite eventdev_common_testsuite = { test_eventdev_start_stop), TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device, test_eventdev_profile_switch), - TEST_CASE_ST(eventdev_configure_setup, NULL, + TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device, test_eventdev_preschedule_configure), TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device, test_eventdev_preschedule_modify), @@ -1521,6 +1521,12 @@ test_eventdev_selftest_cn10k(void) return test_eventdev_selftest_impl("event_cn10k", ""); } +static int +test_eventdev_selftest_cn20k(void) +{ + return test_eventdev_selftest_impl("event_cn20k", ""); +} + #endif /* !RTE_EXEC_ENV_WINDOWS */ REGISTER_FAST_TEST(eventdev_common_autotest, true, true, test_eventdev_common); @@ -1532,5 +1538,6 @@ REGISTER_DRIVER_TEST(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2); REGISTER_DRIVER_TEST(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2); REGISTER_DRIVER_TEST(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k); REGISTER_DRIVER_TEST(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k); +REGISTER_DRIVER_TEST(eventdev_selftest_cn20k, test_eventdev_selftest_cn20k); #endif /* !RTE_EXEC_ENV_WINDOWS */ diff --git a/app/test/test_fib.c b/app/test/test_fib.c index 15035ee045..ecd3fb4297 100644 --- a/app/test/test_fib.c +++ b/app/test/test_fib.c @@ -400,7 +400,6 @@ test_invalid_rcu(void) config.max_routes = MAX_ROUTES; config.rib_ext_sz = 0; config.default_nh = def_nh; - config.type = RTE_FIB_DUMMY; fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config); RTE_TEST_ASSERT(fib != NULL, "Failed to create FIB\n"); @@ -417,23 +416,33 @@ test_invalid_rcu(void) rcu_cfg.v = qsv; /* adding rcu to RTE_FIB_DUMMY FIB type */ + config.type = RTE_FIB_DUMMY; rcu_cfg.mode = RTE_FIB_QSBR_MODE_SYNC; status = rte_fib_rcu_qsbr_add(fib, &rcu_cfg); - RTE_TEST_ASSERT(status == -ENOTSUP, "rte_fib_rcu_qsbr_add returned wrong error status\n"); + RTE_TEST_ASSERT(status == -ENOTSUP, + "rte_fib_rcu_qsbr_add returned wrong error status when called with DUMMY type FIB\n"); rte_fib_free(fib); - /* Invalid QSBR mode */ config.type = RTE_FIB_DIR24_8; config.dir24_8.nh_sz = RTE_FIB_DIR24_8_4B; config.dir24_8.num_tbl8 = MAX_TBL8; fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config); RTE_TEST_ASSERT(fib != NULL, "Failed to create FIB\n"); + + /* Call rte_fib_rcu_qsbr_add without fib or config */ + status = rte_fib_rcu_qsbr_add(NULL, &rcu_cfg); + RTE_TEST_ASSERT(status == -EINVAL, "RCU added without fib\n"); + status = rte_fib_rcu_qsbr_add(fib, NULL); + RTE_TEST_ASSERT(status == -EINVAL, "RCU added without config\n"); + + /* Invalid QSBR mode */ rcu_cfg.mode = 2; status = rte_fib_rcu_qsbr_add(fib, &rcu_cfg); - RTE_TEST_ASSERT(status != 0, "Failed to add RCU\n"); + RTE_TEST_ASSERT(status == -EINVAL, "RCU added with incorrect mode\n"); rcu_cfg.mode = RTE_FIB_QSBR_MODE_DQ; - /* Attach RCU QSBR to FIB */ + + /* Attach RCU QSBR to FIB to check for double attach */ status = rte_fib_rcu_qsbr_add(fib, &rcu_cfg); RTE_TEST_ASSERT(status == 0, "Can not attach RCU to FIB\n"); @@ -445,7 +454,7 @@ test_invalid_rcu(void) rcu_cfg.v = qsv2; rcu_cfg.mode = RTE_FIB_QSBR_MODE_SYNC; status = rte_fib_rcu_qsbr_add(fib, &rcu_cfg); - RTE_TEST_ASSERT(status != 0, "Secondary RCU was mistakenly attached\n"); + RTE_TEST_ASSERT(status == -EEXIST, "Secondary RCU was mistakenly attached\n"); rte_fib_free(fib); rte_free(qsv); diff --git a/app/test/test_lcore_var.c b/app/test/test_lcore_var.c new file mode 100644 index 0000000000..bcf785b321 --- /dev/null +++ b/app/test/test_lcore_var.c @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Ericsson AB + */ + +#include +#include +#include + +#include +#include +#include + +#include "test.h" + +#define MIN_LCORES 2 + +RTE_LCORE_VAR_HANDLE(int, test_int); +RTE_LCORE_VAR_HANDLE(char, test_char); +RTE_LCORE_VAR_HANDLE(long, test_long_sized); +RTE_LCORE_VAR_HANDLE(short, test_short); +RTE_LCORE_VAR_HANDLE(long, test_long_sized_aligned); + +struct int_checker_state { + int old_value; + int new_value; + bool success; +}; + +static void +rand_blk(void *blk, size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + ((unsigned char *)blk)[i] = (unsigned char)rte_rand(); +} + +static bool +is_ptr_aligned(const void *ptr, size_t align) +{ + return ptr != NULL ? (uintptr_t)ptr % align == 0 : false; +} + +static int +check_int(void *arg) +{ + struct int_checker_state *state = arg; + + int *ptr = RTE_LCORE_VAR(test_int); + + bool naturally_aligned = is_ptr_aligned(ptr, sizeof(int)); + + bool equal = *(RTE_LCORE_VAR(test_int)) == state->old_value; + + state->success = equal && naturally_aligned; + + *ptr = state->new_value; + + return 0; +} + +RTE_LCORE_VAR_INIT(test_int); +RTE_LCORE_VAR_INIT(test_char); +RTE_LCORE_VAR_INIT_SIZE(test_long_sized, 32); +RTE_LCORE_VAR_INIT(test_short); +RTE_LCORE_VAR_INIT_SIZE_ALIGN(test_long_sized_aligned, sizeof(long), RTE_CACHE_LINE_SIZE); + +static int +test_int_lvar(void) +{ + unsigned int lcore_id; + + struct int_checker_state states[RTE_MAX_LCORE] = {}; + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + struct int_checker_state *state = &states[lcore_id]; + + state->old_value = (int)rte_rand(); + state->new_value = (int)rte_rand(); + + *RTE_LCORE_VAR_LCORE(lcore_id, test_int) = state->old_value; + } + + RTE_LCORE_FOREACH_WORKER(lcore_id) + rte_eal_remote_launch(check_int, &states[lcore_id], lcore_id); + + rte_eal_mp_wait_lcore(); + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + struct int_checker_state *state = &states[lcore_id]; + int value; + + TEST_ASSERT(state->success, + "Unexpected value encountered on lcore %d", lcore_id); + + value = *RTE_LCORE_VAR_LCORE(lcore_id, test_int); + TEST_ASSERT_EQUAL(state->new_value, value, + "Lcore %d failed to update int", lcore_id); + } + + /* take the opportunity to test the foreach macro */ + int *v; + unsigned int i = 0; + RTE_LCORE_VAR_FOREACH(lcore_id, v, test_int) { + TEST_ASSERT_EQUAL(i, lcore_id, + "Encountered lcore id %d while expecting %d during iteration", + lcore_id, i); + TEST_ASSERT_EQUAL(states[lcore_id].new_value, *v, + "Unexpected value on lcore %d during iteration", lcore_id); + i++; + } + + return TEST_SUCCESS; +} + +static int +test_sized_alignment(void) +{ + unsigned int lcore_id; + long *v; + + RTE_LCORE_VAR_FOREACH(lcore_id, v, test_long_sized) { + TEST_ASSERT(is_ptr_aligned(v, alignof(long)), "Type-derived alignment failed"); + } + + RTE_LCORE_VAR_FOREACH(lcore_id, v, test_long_sized_aligned) { + TEST_ASSERT(is_ptr_aligned(v, RTE_CACHE_LINE_SIZE), "Explicit alignment failed"); + } + + return TEST_SUCCESS; +} + +/* private, larger, struct */ +#define TEST_STRUCT_DATA_SIZE 1234 + +struct test_struct { + uint8_t data[TEST_STRUCT_DATA_SIZE]; +}; + +static RTE_LCORE_VAR_HANDLE(char, before_struct); +static RTE_LCORE_VAR_HANDLE(struct test_struct, test_struct); +static RTE_LCORE_VAR_HANDLE(char, after_struct); + +struct struct_checker_state { + struct test_struct old_value; + struct test_struct new_value; + bool success; +}; + +static int check_struct(void *arg) +{ + struct struct_checker_state *state = arg; + + struct test_struct *lcore_struct = RTE_LCORE_VAR(test_struct); + + bool properly_aligned = is_ptr_aligned(test_struct, alignof(struct test_struct)); + + bool equal = memcmp(lcore_struct->data, state->old_value.data, TEST_STRUCT_DATA_SIZE) == 0; + + state->success = equal && properly_aligned; + + memcpy(lcore_struct->data, state->new_value.data, TEST_STRUCT_DATA_SIZE); + + return 0; +} + +static int +test_struct_lvar(void) +{ + unsigned int lcore_id; + + RTE_LCORE_VAR_ALLOC(before_struct); + RTE_LCORE_VAR_ALLOC(test_struct); + RTE_LCORE_VAR_ALLOC(after_struct); + + struct struct_checker_state states[RTE_MAX_LCORE]; + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + struct struct_checker_state *state = &states[lcore_id]; + + rand_blk(state->old_value.data, TEST_STRUCT_DATA_SIZE); + rand_blk(state->new_value.data, TEST_STRUCT_DATA_SIZE); + + memcpy(RTE_LCORE_VAR_LCORE(lcore_id, test_struct)->data, + state->old_value.data, TEST_STRUCT_DATA_SIZE); + } + + RTE_LCORE_FOREACH_WORKER(lcore_id) + rte_eal_remote_launch(check_struct, &states[lcore_id], lcore_id); + + rte_eal_mp_wait_lcore(); + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + struct struct_checker_state *state = &states[lcore_id]; + struct test_struct *lstruct = RTE_LCORE_VAR_LCORE(lcore_id, test_struct); + + TEST_ASSERT(state->success, "Unexpected value encountered on lcore %d", lcore_id); + + bool equal = memcmp(lstruct->data, state->new_value.data, + TEST_STRUCT_DATA_SIZE) == 0; + + TEST_ASSERT(equal, "Lcore %d failed to update struct", lcore_id); + } + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + char before = *RTE_LCORE_VAR_LCORE(lcore_id, before_struct); + char after = *RTE_LCORE_VAR_LCORE(lcore_id, after_struct); + + TEST_ASSERT_EQUAL(before, 0, + "Lcore variable before test struct was modified on lcore %d", + lcore_id); + TEST_ASSERT_EQUAL(after, 0, + "Lcore variable after test struct was modified on lcore %d", + lcore_id); + } + + return TEST_SUCCESS; +} + +#define TEST_ARRAY_SIZE 99 + +typedef uint16_t test_array_t[TEST_ARRAY_SIZE]; + +static void test_array_init_rand(test_array_t a) +{ + size_t i; + for (i = 0; i < TEST_ARRAY_SIZE; i++) + a[i] = (uint16_t)rte_rand(); +} + +static bool test_array_equal(test_array_t a, test_array_t b) +{ + size_t i; + for (i = 0; i < TEST_ARRAY_SIZE; i++) { + if (a[i] != b[i]) + return false; + } + return true; +} + +static void test_array_copy(test_array_t dst, const test_array_t src) +{ + size_t i; + for (i = 0; i < TEST_ARRAY_SIZE; i++) + dst[i] = src[i]; +} + +static RTE_LCORE_VAR_HANDLE(char, before_array); +static RTE_LCORE_VAR_HANDLE(test_array_t, test_array); +static RTE_LCORE_VAR_HANDLE(char, after_array); + +struct array_checker_state { + test_array_t old_value; + test_array_t new_value; + bool success; +}; + +static int check_array(void *arg) +{ + struct array_checker_state *state = arg; + + test_array_t *lcore_array = RTE_LCORE_VAR(test_array); + + bool properly_aligned = is_ptr_aligned(lcore_array, alignof(test_array_t)); + + bool equal = test_array_equal(*lcore_array, state->old_value); + + state->success = equal && properly_aligned; + + test_array_copy(*lcore_array, state->new_value); + + return 0; +} + +static int +test_array_lvar(void) +{ + unsigned int lcore_id; + + RTE_LCORE_VAR_ALLOC(before_array); + RTE_LCORE_VAR_ALLOC(test_array); + RTE_LCORE_VAR_ALLOC(after_array); + + struct array_checker_state states[RTE_MAX_LCORE]; + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + struct array_checker_state *state = &states[lcore_id]; + + test_array_init_rand(state->new_value); + test_array_init_rand(state->old_value); + + test_array_copy(*RTE_LCORE_VAR_LCORE(lcore_id, test_array), state->old_value); + } + + RTE_LCORE_FOREACH_WORKER(lcore_id) + rte_eal_remote_launch(check_array, &states[lcore_id], lcore_id); + + rte_eal_mp_wait_lcore(); + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + struct array_checker_state *state = &states[lcore_id]; + test_array_t *larray = RTE_LCORE_VAR_LCORE(lcore_id, test_array); + + TEST_ASSERT(state->success, "Unexpected value encountered on lcore %d", lcore_id); + + bool equal = test_array_equal(*larray, state->new_value); + + TEST_ASSERT(equal, "Lcore %d failed to update array", lcore_id); + } + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + char before = *RTE_LCORE_VAR_LCORE(lcore_id, before_array); + char after = *RTE_LCORE_VAR_LCORE(lcore_id, after_array); + + TEST_ASSERT_EQUAL(before, 0, + "Lcore variable before test array was modified on lcore %d", + lcore_id); + TEST_ASSERT_EQUAL(after, 0, + "Lcore variable after test array was modified on lcore %d", + lcore_id); + } + + return TEST_SUCCESS; +} + +#define MANY_LVARS (2 * RTE_MAX_LCORE_VAR / sizeof(uint32_t)) + +static int +test_many_lvars(void) +{ + uint32_t **handlers = malloc(sizeof(uint32_t *) * MANY_LVARS); + unsigned int i; + + TEST_ASSERT(handlers != NULL, "Unable to allocate memory"); + + for (i = 0; i < MANY_LVARS; i++) { + unsigned int lcore_id; + + RTE_LCORE_VAR_ALLOC(handlers[i]); + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + uint32_t *v = RTE_LCORE_VAR_LCORE(lcore_id, handlers[i]); + *v = (uint32_t)(i * lcore_id); + } + } + + for (i = 0; i < MANY_LVARS; i++) { + unsigned int lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + uint32_t v = *RTE_LCORE_VAR_LCORE(lcore_id, handlers[i]); + TEST_ASSERT_EQUAL((uint32_t)(i * lcore_id), v, + "Unexpected lcore variable value on lcore %d", + lcore_id); + } + } + + free(handlers); + + return TEST_SUCCESS; +} + +static int +test_large_lvar(void) +{ + RTE_LCORE_VAR_HANDLE(unsigned char, large); + unsigned int lcore_id; + + RTE_LCORE_VAR_ALLOC_SIZE(large, RTE_MAX_LCORE_VAR); + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + unsigned char *ptr = RTE_LCORE_VAR_LCORE(lcore_id, large); + + memset(ptr, (unsigned char)lcore_id, RTE_MAX_LCORE_VAR); + } + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + unsigned char *ptr = RTE_LCORE_VAR_LCORE(lcore_id, large); + size_t i; + + for (i = 0; i < RTE_MAX_LCORE_VAR; i++) + TEST_ASSERT_EQUAL(ptr[i], (unsigned char)lcore_id, + "Large lcore variable value is corrupted on lcore %d.", + lcore_id); + } + + return TEST_SUCCESS; +} + +static struct unit_test_suite lcore_var_testsuite = { + .suite_name = "lcore variable autotest", + .unit_test_cases = { + TEST_CASE(test_int_lvar), + TEST_CASE(test_sized_alignment), + TEST_CASE(test_struct_lvar), + TEST_CASE(test_array_lvar), + TEST_CASE(test_many_lvars), + TEST_CASE(test_large_lvar), + TEST_CASES_END() + }, +}; + +static int test_lcore_var(void) +{ + if (rte_lcore_count() < MIN_LCORES) { + printf("Not enough cores for lcore_var_autotest; expecting at least %d.\n", + MIN_LCORES); + return TEST_SKIPPED; + } + + return unit_test_suite_runner(&lcore_var_testsuite); +} + +REGISTER_FAST_TEST(lcore_var_autotest, true, false, test_lcore_var); diff --git a/app/test/test_lcore_var_perf.c b/app/test/test_lcore_var_perf.c new file mode 100644 index 0000000000..41e29bbd49 --- /dev/null +++ b/app/test/test_lcore_var_perf.c @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Ericsson AB + */ + +#define MAX_MODS 1024 + +#include + +#include +#include +#include +#include +#include + +#include "test.h" + +struct mod_lcore_state { + uint64_t a; + uint64_t b; + uint64_t sum; +}; + +static void +mod_init(struct mod_lcore_state *state) +{ + state->a = rte_rand(); + state->b = rte_rand(); + state->sum = 0; +} + +static __rte_always_inline void +mod_update(volatile struct mod_lcore_state *state) +{ + state->sum += state->a * state->b; +} + +struct __rte_cache_aligned mod_lcore_state_aligned { + struct mod_lcore_state mod_state; + + RTE_CACHE_GUARD; +}; + +static struct mod_lcore_state_aligned +sarray_lcore_state[MAX_MODS][RTE_MAX_LCORE]; + +static void +sarray_init(void) +{ + unsigned int lcore_id = rte_lcore_id(); + int mod; + + for (mod = 0; mod < MAX_MODS; mod++) { + struct mod_lcore_state *mod_state = &sarray_lcore_state[mod][lcore_id].mod_state; + + mod_init(mod_state); + } +} + +static __rte_noinline void +sarray_update(unsigned int mod) +{ + unsigned int lcore_id = rte_lcore_id(); + struct mod_lcore_state *mod_state = &sarray_lcore_state[mod][lcore_id].mod_state; + + mod_update(mod_state); +} + +struct mod_lcore_state_lazy { + struct mod_lcore_state mod_state; + bool initialized; +}; + +/* + * Note: it's usually a bad idea have this much thread-local storage + * allocated in a real application, since it will incur a cost on + * thread creation and non-lcore thread memory usage. + */ +static RTE_DEFINE_PER_LCORE(struct mod_lcore_state_lazy, tls_lcore_state)[MAX_MODS]; + +static inline void +tls_init(struct mod_lcore_state_lazy *state) +{ + mod_init(&state->mod_state); + + state->initialized = true; +} + +static __rte_noinline void +tls_lazy_update(unsigned int mod) +{ + struct mod_lcore_state_lazy *state = + &RTE_PER_LCORE(tls_lcore_state[mod]); + + /* With thread-local storage, initialization must usually be lazy */ + if (!state->initialized) + tls_init(state); + + mod_update(&state->mod_state); +} + +static __rte_noinline void +tls_update(unsigned int mod) +{ + struct mod_lcore_state_lazy *state = + &RTE_PER_LCORE(tls_lcore_state[mod]); + + mod_update(&state->mod_state); +} + +RTE_LCORE_VAR_HANDLE(struct mod_lcore_state, lvar_lcore_state)[MAX_MODS]; + +static void +lvar_init(void) +{ + unsigned int mod; + + for (mod = 0; mod < MAX_MODS; mod++) { + RTE_LCORE_VAR_ALLOC(lvar_lcore_state[mod]); + + struct mod_lcore_state *state = RTE_LCORE_VAR(lvar_lcore_state[mod]); + + mod_init(state); + } +} + +static __rte_noinline void +lvar_update(unsigned int mod) +{ + struct mod_lcore_state *state = RTE_LCORE_VAR(lvar_lcore_state[mod]); + + mod_update(state); +} + +static void +shuffle(unsigned int *elems, size_t len) +{ + size_t i; + + for (i = len - 1; i > 0; i--) { + unsigned int other = rte_rand_max(i + 1); + + unsigned int tmp = elems[other]; + elems[other] = elems[i]; + elems[i] = tmp; + } +} + +#define ITERATIONS UINT64_C(10000000) + +static inline double +benchmark_access(const unsigned int *mods, unsigned int num_mods, + void (*init_fun)(void), void (*update_fun)(unsigned int)) +{ + unsigned int i; + double start; + double end; + double latency; + unsigned int num_mods_mask = num_mods - 1; + + RTE_VERIFY(rte_is_power_of_2(num_mods)); + + if (init_fun != NULL) + init_fun(); + + /* Warm up cache and make sure TLS variables are initialized */ + for (i = 0; i < num_mods; i++) + update_fun(i); + + start = rte_rdtsc(); + + for (i = 0; i < ITERATIONS; i++) + update_fun(mods[i & num_mods_mask]); + + end = rte_rdtsc(); + + latency = (end - start) / (double)ITERATIONS; + + return latency; +} + +static void +test_lcore_var_access_n(unsigned int num_mods) +{ + double sarray_latency; + double tls_latency; + double lazy_tls_latency; + double lvar_latency; + unsigned int mods[num_mods]; + unsigned int i; + + for (i = 0; i < num_mods; i++) + mods[i] = i; + + shuffle(mods, num_mods); + + sarray_latency = + benchmark_access(mods, num_mods, sarray_init, sarray_update); + + tls_latency = + benchmark_access(mods, num_mods, NULL, tls_update); + + lazy_tls_latency = + benchmark_access(mods, num_mods, NULL, tls_lazy_update); + + lvar_latency = + benchmark_access(mods, num_mods, lvar_init, lvar_update); + + printf("%17u %8.1f %14.1f %15.1f %10.1f\n", num_mods, sarray_latency, + tls_latency, lazy_tls_latency, lvar_latency); +} + +/* + * The potential performance benefit of lcore variables compared to + * the use of statically sized, lcore id-indexed arrays is not + * shorter latencies in a scenario with low cache pressure, but rather + * fewer cache misses in a real-world scenario, with extensive cache + * usage. These tests are a crude simulation of such, using dummy + * modules, each with a small, per-lcore state. Note however that + * these tests have very little non-lcore/thread local state, which is + * unrealistic. + */ + +static int +test_lcore_var_access(void) +{ + unsigned int num_mods = 1; + + printf("- Latencies [TSC cycles/update] -\n"); + printf("Number of Static Thread-local Thread-local Lcore\n"); + printf("Modules/Variables Array Storage Storage (Lazy) Variables\n"); + + for (num_mods = 1; num_mods <= MAX_MODS; num_mods *= 2) + test_lcore_var_access_n(num_mods); + + return TEST_SUCCESS; +} + +static struct unit_test_suite lcore_var_testsuite = { + .suite_name = "lcore variable perf autotest", + .unit_test_cases = { + TEST_CASE(test_lcore_var_access), + TEST_CASES_END() + }, +}; + +static int +test_lcore_var_perf(void) +{ + return unit_test_suite_runner(&lcore_var_testsuite); +} + +REGISTER_PERF_TEST(lcore_var_perf_autotest, test_lcore_var_perf); diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c index 3c9c824335..2cb689b1de 100644 --- a/app/test/test_link_bonding_rssconf.c +++ b/app/test/test_link_bonding_rssconf.c @@ -616,7 +616,6 @@ test_setup(void) mac_addr.addr_bytes[5] = 0x10 + port->port_id; rte_eth_dev_default_mac_addr_set(port->port_id, &mac_addr); - rte_eth_dev_info_get(port->port_id, &port->dev_info); retval = rte_eth_dev_info_get(port->port_id, &port->dev_info); TEST_ASSERT((retval == 0), "Error during getting device (port %u) info: %s\n", diff --git a/app/test/test_power.c b/app/test/test_power.c index 403adc22d6..38507411bd 100644 --- a/app/test/test_power.c +++ b/app/test/test_power.c @@ -22,87 +22,7 @@ test_power(void) #else -#include - -static int -check_function_ptrs(void) -{ - enum power_management_env env = rte_power_get_env(); - - const bool not_null_expected = !(env == PM_ENV_NOT_SET); - - const char *inject_not_string1 = not_null_expected ? " not" : ""; - const char *inject_not_string2 = not_null_expected ? "" : " not"; - - if ((rte_power_freqs == NULL) == not_null_expected) { - printf("rte_power_freqs should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_get_freq == NULL) == not_null_expected) { - printf("rte_power_get_freq should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_set_freq == NULL) == not_null_expected) { - printf("rte_power_set_freq should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_freq_up == NULL) == not_null_expected) { - printf("rte_power_freq_up should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_freq_down == NULL) == not_null_expected) { - printf("rte_power_freq_down should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_freq_max == NULL) == not_null_expected) { - printf("rte_power_freq_max should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_freq_min == NULL) == not_null_expected) { - printf("rte_power_freq_min should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_turbo_status == NULL) == not_null_expected) { - printf("rte_power_turbo_status should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_freq_enable_turbo == NULL) == not_null_expected) { - printf("rte_power_freq_enable_turbo should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_freq_disable_turbo == NULL) == not_null_expected) { - printf("rte_power_freq_disable_turbo should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - if ((rte_power_get_capabilities == NULL) == not_null_expected) { - printf("rte_power_get_capabilities should%s be NULL, environment has%s been " - "initialised\n", inject_not_string1, - inject_not_string2); - return -1; - } - - return 0; -} +#include static int test_power(void) @@ -124,10 +44,6 @@ test_power(void) return -1; } - /* Verify that function pointers are NULL */ - if (check_function_ptrs() < 0) - goto fail_all; - rte_power_unset_env(); /* Perform tests for valid environments.*/ @@ -154,22 +70,11 @@ test_power(void) return -1; } - /* Verify that function pointers are NOT NULL */ - if (check_function_ptrs() < 0) - goto fail_all; - rte_power_unset_env(); - /* Verify that function pointers are NULL */ - if (check_function_ptrs() < 0) - goto fail_all; - } return 0; -fail_all: - rte_power_unset_env(); - return -1; } #endif diff --git a/app/test/test_power_cpufreq.c b/app/test/test_power_cpufreq.c index edbd34424e..0331b37fe0 100644 --- a/app/test/test_power_cpufreq.c +++ b/app/test/test_power_cpufreq.c @@ -30,7 +30,7 @@ test_power_caps(void) } #else -#include +#include #define TEST_POWER_LCORE_ID 2U #define TEST_POWER_LCORE_INVALID ((unsigned)RTE_MAX_LCORE) @@ -534,58 +534,6 @@ test_power_cpufreq(void) goto fail_all; } - /* verify that function pointers are not NULL */ - if (rte_power_freqs == NULL) { - printf("rte_power_freqs should not be NULL, environment has not been " - "initialised\n"); - goto fail_all; - } - if (rte_power_get_freq == NULL) { - printf("rte_power_get_freq should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_set_freq == NULL) { - printf("rte_power_set_freq should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_freq_up == NULL) { - printf("rte_power_freq_up should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_freq_down == NULL) { - printf("rte_power_freq_down should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_freq_max == NULL) { - printf("rte_power_freq_max should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_freq_min == NULL) { - printf("rte_power_freq_min should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_turbo_status == NULL) { - printf("rte_power_turbo_status should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_freq_enable_turbo == NULL) { - printf("rte_power_freq_enable_turbo should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - if (rte_power_freq_disable_turbo == NULL) { - printf("rte_power_freq_disable_turbo should not be NULL, environment has not " - "been initialised\n"); - goto fail_all; - } - ret = rte_power_exit(TEST_POWER_LCORE_ID); if (ret < 0) { printf("Cannot exit power management for lcore %u\n", diff --git a/app/test/test_power_kvm_vm.c b/app/test/test_power_kvm_vm.c index 464e06002e..1c72ba5a4e 100644 --- a/app/test/test_power_kvm_vm.c +++ b/app/test/test_power_kvm_vm.c @@ -20,7 +20,7 @@ test_power_kvm_vm(void) } #else -#include +#include #define TEST_POWER_VM_LCORE_ID 0U #define TEST_POWER_VM_LCORE_OUT_OF_BOUNDS (RTE_MAX_LCORE+1) @@ -47,42 +47,6 @@ test_power_kvm_vm(void) return -1; } - /* verify that function pointers are not NULL */ - if (rte_power_freqs == NULL) { - printf("rte_power_freqs should not be NULL, environment has not been " - "initialised\n"); - return -1; - } - if (rte_power_get_freq == NULL) { - printf("rte_power_get_freq should not be NULL, environment has not " - "been initialised\n"); - return -1; - } - if (rte_power_set_freq == NULL) { - printf("rte_power_set_freq should not be NULL, environment has not " - "been initialised\n"); - return -1; - } - if (rte_power_freq_up == NULL) { - printf("rte_power_freq_up should not be NULL, environment has not " - "been initialised\n"); - return -1; - } - if (rte_power_freq_down == NULL) { - printf("rte_power_freq_down should not be NULL, environment has not " - "been initialised\n"); - return -1; - } - if (rte_power_freq_max == NULL) { - printf("rte_power_freq_max should not be NULL, environment has not " - "been initialised\n"); - return -1; - } - if (rte_power_freq_min == NULL) { - printf("rte_power_freq_min should not be NULL, environment has not " - "been initialised\n"); - return -1; - } /* Test initialisation of an out of bounds lcore */ ret = rte_power_init(TEST_POWER_VM_LCORE_OUT_OF_BOUNDS); if (ret != -1) { diff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c index 245ab3fa7d..69cf029468 100644 --- a/app/test/test_reassembly_perf.c +++ b/app/test/test_reassembly_perf.c @@ -136,9 +136,7 @@ ipv4_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id, for (i = 0; i < nb_frags; i++) { struct rte_mbuf *frag = mbuf[i]; uint16_t frag_offset = 0; - uint32_t ip_cksum; uint16_t pkt_len; - uint16_t *ptr16; frag_offset = i * (frag_len / 8); @@ -189,32 +187,7 @@ ipv4_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id, ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR(flow_id)); ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR(flow_id)); - /* - * Compute IP header checksum. - */ - ptr16 = (unaligned_uint16_t *)ip_hdr; - ip_cksum = 0; - ip_cksum += ptr16[0]; - ip_cksum += ptr16[1]; - ip_cksum += ptr16[2]; - ip_cksum += ptr16[3]; - ip_cksum += ptr16[4]; - ip_cksum += ptr16[6]; - ip_cksum += ptr16[7]; - ip_cksum += ptr16[8]; - ip_cksum += ptr16[9]; - - /* - * Reduce 32 bit checksum to 16 bits and complement it. - */ - ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + - (ip_cksum & 0x0000FFFF); - if (ip_cksum > 65535) - ip_cksum -= 65535; - ip_cksum = (~ip_cksum) & 0x0000FFFF; - if (ip_cksum == 0) - ip_cksum = 0xFFFF; - ip_hdr->hdr_checksum = (uint16_t)ip_cksum; + ip_hdr->hdr_checksum = (uint16_t)rte_ipv4_cksum_simple(ip_hdr); frag->data_len = sizeof(struct rte_ether_hdr) + pkt_len; frag->pkt_len = frag->data_len; diff --git a/app/test/test_thash.c b/app/test/test_thash.c index 0ad6943cf8..b9c6e9118e 100644 --- a/app/test/test_thash.c +++ b/app/test/test_thash.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "test.h" @@ -923,6 +924,112 @@ test_adjust_tuple_mult_reta(void) return TEST_SUCCESS; } +#define RETA_SZ_LOG 11 +#define RSS_KEY_SZ 40 +#define RETA_SZ (1 << RETA_SZ_LOG) +#define NB_HASH_ITER RETA_SZ +#define NB_TEST_ITER 10 + +static inline void +run_hash_calc_loop(uint8_t *key, union rte_thash_tuple *tuple, + unsigned int *rss_reta_hits) +{ + uint32_t rss_hash; + int i; + + for (i = 0; i < NB_HASH_ITER; i++) { + /* variable part starts from the most significant bit */ + tuple->v4.dport = (i << (sizeof(tuple->v4.dport) * CHAR_BIT - + RETA_SZ_LOG)); + /* + * swap sport and dport on LE arch since rte_softrss() + * works with host byte order uint32_t values + */ + tuple->v4.dport = rte_cpu_to_be_16(tuple->v4.dport); + tuple->v4.sctp_tag = rte_be_to_cpu_32(tuple->v4.sctp_tag); + rss_hash = rte_softrss((uint32_t *)tuple, + RTE_THASH_V4_L4_LEN, key); + /* unroll swap, required only for sport */ + tuple->v4.sctp_tag = rte_cpu_to_be_32(tuple->v4.sctp_tag); + rss_reta_hits[rss_hash & (RETA_SZ - 1)]++; + } +} + +static int +hash_calc_iteration(unsigned int *min_before, unsigned int *max_before, + unsigned int *min_after, unsigned int *max_after, + unsigned int *min_default, unsigned int *max_default) +{ + uint8_t key[RSS_KEY_SZ] = {0}; + union rte_thash_tuple tuple; + unsigned int rss_reta_hits_before_adjust[RETA_SZ] = {0}; + unsigned int rss_reta_hits_after_adjust[RETA_SZ] = {0}; + unsigned int rss_reta_hits_default_key[RETA_SZ] = {0}; + int i; + + for (i = 0; i < RSS_KEY_SZ; i++) + key[i] = rte_rand(); + + tuple.v4.src_addr = rte_rand(); + tuple.v4.dst_addr = rte_rand(); + tuple.v4.sport = rte_rand(); + + run_hash_calc_loop(key, &tuple, rss_reta_hits_before_adjust); + + int ret = rte_thash_gen_key(key, RSS_KEY_SZ, RETA_SZ_LOG, + offsetof(union rte_thash_tuple, v4.dport)*CHAR_BIT, + RETA_SZ_LOG); + + if (ret) { + printf("Can't generate key\n"); + return -1; + } + + run_hash_calc_loop(key, &tuple, rss_reta_hits_after_adjust); + + run_hash_calc_loop(default_rss_key, &tuple, rss_reta_hits_default_key); + + for (i = 0; i < RETA_SZ; i++) { + *min_before = RTE_MIN(*min_before, rss_reta_hits_before_adjust[i]); + *max_before = RTE_MAX(*max_before, rss_reta_hits_before_adjust[i]); + *min_after = RTE_MIN(*min_after, rss_reta_hits_after_adjust[i]); + *max_after = RTE_MAX(*max_after, rss_reta_hits_after_adjust[i]); + *min_default = RTE_MIN(*min_default, rss_reta_hits_default_key[i]); + *max_default = RTE_MAX(*max_default, rss_reta_hits_default_key[i]); + } + + return 0; +} + +static int +test_keygen(void) +{ + int i, ret; + unsigned int min_before = UINT32_MAX; + unsigned int min_after = UINT32_MAX; + unsigned int min_default = UINT32_MAX; + unsigned int max_before = 0; + unsigned int max_after = 0; + unsigned int max_default = 0; + + for (i = 0; i < NB_TEST_ITER; i++) { + /* calculates the worst distribution for each key */ + ret = hash_calc_iteration(&min_before, &max_before, &min_after, + &max_after, &min_default, &max_default); + if (ret) + return ret; + } + + printf("RSS before key adjustment: min=%d, max=%d\n", + min_before, max_before); + printf("RSS after key adjustment: min=%d, max=%d\n", + min_after, max_after); + printf("RSS default key: min=%d, max=%d\n", + min_default, max_default); + + return TEST_SUCCESS; +} + static struct unit_test_suite thash_tests = { .suite_name = "thash autotest", .setup = NULL, @@ -944,6 +1051,7 @@ static struct unit_test_suite thash_tests = { TEST_CASE(test_predictable_rss_multirange), TEST_CASE(test_adjust_tuple), TEST_CASE(test_adjust_tuple_mult_reta), + TEST_CASE(test_keygen), TEST_CASES_END() } }; diff --git a/config/arm/meson.build b/config/arm/meson.build index 55be7c8711..1e66434b6b 100644 --- a/config/arm/meson.build +++ b/config/arm/meson.build @@ -460,9 +460,6 @@ soc_cn9k = { 'implementer': '0x43', 'part_number': '0xb2', 'numa': false, - 'flags': [ - ['RTE_IOVA_IN_MBUF', 0] - ] } soc_cn10k = { @@ -472,7 +469,6 @@ soc_cn10k = { ['RTE_MAX_LCORE', 24], ['RTE_MAX_NUMA_NODES', 1], ['RTE_MEMPOOL_ALIGN', 128], - ['RTE_IOVA_IN_MBUF', 0] ], 'part_number': '0xd49', 'extra_march_features': ['crypto'], @@ -488,7 +484,9 @@ soc_dpaa = { ['RTE_MACHINE', '"dpaa"'], ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false], ['RTE_MAX_LCORE', 16], - ['RTE_MAX_NUMA_NODES', 1] + ['RTE_MAX_NUMA_NODES', 1], + ['RTE_DMA_DPAA_ERRATA_ERR050757', true], + ['RTE_DMA_DPAA_ERRATA_ERR050265', true], ], 'numa': false } @@ -784,9 +782,8 @@ else # native build # The script returns ['Implementer', 'Variant', 'Architecture', # 'Primary Part number', 'Revision'] - detect_vendor = find_program(join_paths(meson.current_source_dir(), - 'armv8_machine.py')) - cmd = run_command(detect_vendor.path(), check: false) + detect_vendor = py3 + files('armv8_machine.py') + cmd = run_command(detect_vendor, check: false) if cmd.returncode() == 0 cmd_output = cmd.stdout().to_lower().strip().split(' ') implementer_id = cmd_output[0] diff --git a/config/meson.build b/config/meson.build index 8dae811378..6aaad6d8a4 100644 --- a/config/meson.build +++ b/config/meson.build @@ -95,8 +95,11 @@ eal_pmd_path = join_paths(get_option('prefix'), driver_install_path) # e.g. ixgbe depends on librte_bus_pci. This means that the bus drivers need # to be in the library path, so symlink the drivers from the main lib directory. if not is_windows - meson.add_install_script('../buildtools/symlink-drivers-solibs.sh', - get_option('libdir'), pmd_subdir_opt) + # skip symlink-drivers-solibs.sh execution on no sub directory + if pmd_subdir_opt != '' and pmd_subdir_opt != '.' + meson.add_install_script('../buildtools/symlink-drivers-solibs.sh', + get_option('libdir'), pmd_subdir_opt) + endif else meson.add_install_script(py3, files('../buildtools/symlink-drivers-solibs.py'), @@ -325,7 +328,6 @@ warning_flags = [ '-Wwrite-strings', # globally disabled warnings - '-Wno-address-of-packed-member', '-Wno-packed-not-aligned', '-Wno-missing-field-initializers', ] diff --git a/config/rte_config.h b/config/rte_config.h index fd6f8a2f1a..3734db6bdc 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -41,6 +41,7 @@ /* EAL defines */ #define RTE_CACHE_GUARD_LINES 1 #define RTE_MAX_HEAPS 32 +#define RTE_MAX_LCORE_VAR 131072 #define RTE_MAX_MEMSEG_LISTS 128 #define RTE_MAX_MEMSEG_PER_LIST 8192 #define RTE_MAX_MEM_MB_PER_LIST 32768 diff --git a/config/x86/meson.build b/config/x86/meson.build index 265580a555..5455bb0210 100644 --- a/config/x86/meson.build +++ b/config/x86/meson.build @@ -96,6 +96,7 @@ dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64) dpdk_conf.set('RTE_MAX_LCORE', 128) epyc_zen_cores = { + '__znver5__':768, '__znver4__':512, '__znver3__':256, '__znver2__':256, diff --git a/devtools/check-forbidden-tokens.awk b/devtools/check-forbidden-tokens.awk index 59b1121090..28d32fc086 100755 --- a/devtools/check-forbidden-tokens.awk +++ b/devtools/check-forbidden-tokens.awk @@ -10,6 +10,7 @@ BEGIN { split(FOLDERS,deny_folders," "); split(EXPRESSIONS,deny_expr," "); + split(SKIP_FILES,skip_files," "); in_file=0; in_comment=0; count=0; @@ -56,14 +57,22 @@ BEGIN { } count = 0 for (i in deny_folders) { - re = "^\\+\\+\\+ b/" deny_folders[i]; - if ($0 ~ re) { - # Check only if the files are not part of SKIP_FILES - if (!(length(SKIP_FILES) && ($re ~ SKIP_FILES))) { - in_file = 1 - last_file = $0 + if (!($0 ~ "^\\+\\+\\+ b/" deny_folders[i])) { + continue + } + skip = 0 + for (j in skip_files) { + if (!($0 ~ "^\\+\\+\\+ b/" skip_files[j])) { + continue } + skip = 1 + break + } + if (skip == 0) { + in_file = 1 + last_file = $0 } + break } } END { diff --git a/devtools/check-spdx-tag.sh b/devtools/check-spdx-tag.sh index 1ffccdaecc..b983268b1e 100755 --- a/devtools/check-spdx-tag.sh +++ b/devtools/check-spdx-tag.sh @@ -24,6 +24,7 @@ check_spdx() { ':^README' ':^MAINTAINERS' ':^VERSION' ':^ABI_VERSION' \ ':^*/Kbuild' ':^*/README*' \ ':^license/' ':^config/' ':^buildtools/' ':^*/poetry.lock' \ + ':^kernel/linux/uapi/.gitignore' ':^kernel/linux/uapi/version' \ ':^*.cocci' ':^*.abignore' \ ':^*.map' ':^*.ini' ':^*.data' ':^*.json' ':^*.cfg' ':^*.txt' \ ':^*.svg' ':^*.png' \ diff --git a/devtools/checkpatches.sh b/devtools/checkpatches.sh index d860f19045..4a8591be22 100755 --- a/devtools/checkpatches.sh +++ b/devtools/checkpatches.sh @@ -177,11 +177,12 @@ check_forbidden_additions() { # -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 - # forbid use of non abstracted bit count operations + # forbid use of compiler __builtin_* awk -v FOLDERS="lib drivers app examples" \ - -v EXPRESSIONS='\\<__builtin_(clz|ctz|ffs|popcount)(ll)?\\>' \ + -v SKIP_FILES='lib/eal/ drivers/.*/base/ drivers/.*osdep.h$' \ + -v EXPRESSIONS='\\<__builtin_' \ -v RET_ON_FAIL=1 \ - -v MESSAGE='Using __builtin helpers for bit count operations' \ + -v MESSAGE='Using __builtin helpers, prefer EAL macros' \ -f $(dirname $(readlink -f $0))/check-forbidden-tokens.awk \ "$1" || res=1 diff --git a/devtools/linux-uapi.sh b/devtools/linux-uapi.sh new file mode 100755 index 0000000000..ccd8b6816e --- /dev/null +++ b/devtools/linux-uapi.sh @@ -0,0 +1,171 @@ +#!/bin/sh -e +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Red Hat, Inc. + +# +# Import and check Linux Kernel uAPI headers +# + +base_url="https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/include/uapi/" +base_path="kernel/linux/uapi/" +version="" +file="" +check_headers=0 +errors=0 + +print_usage() +{ + echo "Usage: $(basename $0) [-h] [-i FILE] [-u VERSION] [-c]" + echo "-i FILE import Linux header file. E.g. linux/vfio.h" + echo "-u VERSION update imported list of Linux headers to a given version. E.g. v6.10" + echo "-c check headers are valid" +} + +version_older_than() { + printf '%s\n%s' "$1" "$2" | sort -C -V +} + +download_header() +{ + local header=$1 + local path=$2 + + local url="${base_url}${header}?h=${version}" + + if ! curl -s -f --create-dirs -o $path $url; then + echo "Failed to download $url" + return 1 + fi + + return 0 +} + +update_headers() +{ + local header + local url + local path + + echo "Updating to $version" + for filename in $(find $base_path -name "*.h" -type f); do + header=${filename#$base_path} + download_header $header $filename || return 1 + done + + return 0 +} + +import_header() +{ + local include + local import + local header=$1 + + local path="${base_path}${header}" + + download_header $header $path || return 1 + + for include in $(sed -ne 's/^#include <\(.*\)>$/\1/p' $path); do + if [ ! -f "${base_path}${include}" ]; then + read -p "Import $include (y/n): " import && [ "$import" = 'y' ] || continue + echo "Importing $include for $path" + import_header "$include" || return 1 + fi + done + + return 0 +} + +fixup_includes() +{ + local path=$1 + + sed -i "s|^#include ||g" $path + sed -i "s|\<__user[[:space:]]||" $path + + # Prepend include path with "uapi/" if the header is imported + for include in $(sed -ne 's/^#include <\(.*\)>$/\1/p' $path); do + if [ -f "${base_path}${include}" ]; then + sed -i "s|${include}|uapi/${include}|g" $path + fi + done +} + +check_header() { + echo -n "Checking $1... " + + if ! diff -q $1 $2 >/dev/null; then + echo "KO" + diff -u $1 $2 + return 1 + else + echo "OK" + fi + + return 0 +} + +while getopts i:u:ch opt ; do + case ${opt} in + i ) file=$OPTARG ;; + u ) version=$OPTARG ;; + c ) check_headers=1 ;; + h ) print_usage ; exit 0 ;; + ? ) print_usage ; exit 1 ;; + esac +done + +shift $(($OPTIND - 1)) +if [ $# -ne 0 ]; then + print_usage + exit 1 +fi + +cd $(dirname $0)/.. + +current_version=$(< ${base_path}/version) + +if [ -n "${version}" ]; then + if version_older_than "$version" "$current_version"; then + echo "Headers already up to date ($current_version >= $version)" + version=$current_version + else + update_headers || exit 1 + fi +else + echo "Version not specified, using current version ($current_version)" + version=$current_version +fi + +if [ -n "${file}" ]; then + import_header $file || exit 1 +fi + +for filename in $(find $base_path -name "*.h" -type f); do + fixup_includes $filename || exit 1 +done + +echo $version > ${base_path}/version + +if [ $check_headers -eq 0 ]; then + exit 0 +fi + +tmpheader="$(mktemp -t dpdk.checkuapi.XXXXXX)" +trap "rm -f '$tmpheader'" INT + +echo "Checking imported headers for version ${version}" + +for filename in $(find $base_path -name "*.h" -type f); do + header=${filename#$base_path} + download_header $header $tmpheader || exit 1 + fixup_includes $tmpheader || exit 1 + check_header $filename $tmpheader || errors=$((errors+1)) +done + +echo "$errors error(s) found" + +rm -f $tmpheader +trap - INT + +exit $errors diff --git a/devtools/parse-flow-support.sh b/devtools/parse-flow-support.sh index 9811c7881c..44b4cecafd 100755 --- a/devtools/parse-flow-support.sh +++ b/devtools/parse-flow-support.sh @@ -33,6 +33,13 @@ exclude() # tr '\n' '|' | sed 's,.$,\n,') exceptions='RTE_FLOW_ACTION_TYPE_SHARED' grep -vE "$filter" | grep -vE $exceptions;; + dpaa2) + filter=$(sed -n "/$1/{N;/Skip this/P;}" \ + $dir/dpaa2_flow.c | + grep -wo "$1[[:alnum:]_]*" | sort -u | + tr '\n' '|' | sed 's,.$,\n,') + [ "$1" = 'RTE_FLOW_ITEM_TYPE_' -a -z "$filter" ] && cat || + grep -vE "$filter";; *) cat esac } diff --git a/devtools/words-case.txt b/devtools/words-case.txt index 2257ce6201..bc35c160ba 100644 --- a/devtools/words-case.txt +++ b/devtools/words-case.txt @@ -104,6 +104,7 @@ TPID TSO TTL Tx +uAPI UDM UDP ULP diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index 266c8b90dc..f0193502bc 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -57,12 +57,14 @@ The public API headers are grouped by topics: [mlx5](@ref rte_pmd_mlx5.h), [dpaa2_mempool](@ref rte_dpaa2_mempool.h), [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h), - [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h), + [dpaax_qdma](@ref rte_pmd_dpaax_qdma.h), [crypto_scheduler](@ref rte_cryptodev_scheduler.h), [dlb2](@ref rte_pmd_dlb2.h), [ifpga](@ref rte_pmd_ifpga.h) - **memory**: + [per-lcore](@ref rte_per_lcore.h), + [lcore variables](@ref rte_lcore_var.h), [memseg](@ref rte_memory.h), [memzone](@ref rte_memzone.h), [mempool](@ref rte_mempool.h), @@ -99,10 +101,9 @@ The public API headers are grouped by topics: [interrupts](@ref rte_interrupts.h), [launch](@ref rte_launch.h), [lcore](@ref rte_lcore.h), - [per-lcore](@ref rte_per_lcore.h), [service cores](@ref rte_service.h), [keepalive](@ref rte_keepalive.h), - [power/freq](@ref rte_power.h), + [power/freq](@ref rte_power_cpufreq.h), [power/uncore](@ref rte_power_uncore.h), [PMD power](@ref rte_power_pmd_mgmt.h) diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in index c94f02d411..d23352d300 100644 --- a/doc/api/doxy-api.conf.in +++ b/doc/api/doxy-api.conf.in @@ -6,9 +6,9 @@ PROJECT_NUMBER = @VERSION@ USE_MDFILE_AS_MAINPAGE = @TOPDIR@/doc/api/doxy-api-index.md INPUT = @TOPDIR@/doc/api/doxy-api-index.md \ @TOPDIR@/drivers/bus/vdev \ + @TOPDIR@/drivers/common/dpaax \ @TOPDIR@/drivers/crypto/cnxk \ @TOPDIR@/drivers/crypto/scheduler \ - @TOPDIR@/drivers/dma/dpaa2 \ @TOPDIR@/drivers/event/dlb2 \ @TOPDIR@/drivers/event/cnxk \ @TOPDIR@/drivers/mempool/cnxk \ diff --git a/doc/guides/contributing/index.rst b/doc/guides/contributing/index.rst index 7fc6511361..7dc89b9afc 100644 --- a/doc/guides/contributing/index.rst +++ b/doc/guides/contributing/index.rst @@ -17,6 +17,7 @@ Contributor's Guidelines new_library new_driver patches + linux_uapi vulnerability stable cheatsheet diff --git a/doc/guides/contributing/linux_uapi.rst b/doc/guides/contributing/linux_uapi.rst new file mode 100644 index 0000000000..79bedb478e --- /dev/null +++ b/doc/guides/contributing/linux_uapi.rst @@ -0,0 +1,70 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2024 Red Hat, Inc. + +Linux uAPI header files +======================= + +Rationale +--------- + +The system a DPDK library or driver is built on is not necessarily running the +same Kernel version than the system that will run it. +Importing Linux Kernel uAPI headers enable to build features that are not +supported yet by the build system. + +For example, the build system runs upstream Kernel v5.19 and we would like to +build a VDUSE application that will use VDUSE_IOTLB_GET_INFO ioctl() introduced +in Linux Kernel v6.0. + +`Linux Kernel licence exception regarding syscalls +`_ +enable importing unmodified Linux Kernel uAPI header files. + +Importing or updating an uAPI header file +----------------------------------------- + +In order to ensure the imported uAPI headers are both unmodified and from a +released version of the linux Kernel, a helper script is made available and +MUST be used. +Below is an example to import ``linux/vduse.h`` file from Linux ``v6.10``: + +.. code-block:: console + + devtools/linux-uapi.sh -i linux/vduse.h -u v6.10 + +Once imported, the header files should be committed without any other change. +Note that all the imported headers will be updated to the requested version. + +Updating imported headers to a newer released version should only be done on +a need basis, it can be achieved using the same script: + +.. code-block:: console + + devtools/linux-uapi.sh -u v6.10 + +The commit message should reflect why updating the headers is necessary. + +Once committed, user can check headers are valid by using the same Linux +uAPI script using the check option: + +.. code-block:: console + + devtools/linux-uapi.sh -c + +Note that all the linux-uapi.sh script options can be combined. For example: + +.. code-block:: console + + devtools/linux-uapi.sh -i linux/virtio_net.h -u v6.10 -c + +Header inclusion into library or driver +--------------------------------------- + +The library or driver willing to make use of imported uAPI headers needs to +explicitly include the header file with ``uapi/`` prefix in C files. +For example to include VDUSE uAPI: + +.. code-block:: c + + #include + diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst index 3af1486553..7592d33da2 100644 --- a/doc/guides/cryptodevs/aesni_gcm.rst +++ b/doc/guides/cryptodevs/aesni_gcm.rst @@ -74,7 +74,8 @@ and the external crypto libraries supported by them: DPDK version Crypto library version ============= ================================ 20.11 - 21.08 Multi-buffer library 0.53 - 1.3 - 21.11+ Multi-buffer library 1.0 - 1.5 + 21.11 - 24.07 Multi-buffer library 1.0 - 1.5 + 24.11+ Multi-buffer library 1.4 - 1.5 ============= ================================ Initialization diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst index ca930be1bd..16d82147b2 100644 --- a/doc/guides/cryptodevs/aesni_mb.rst +++ b/doc/guides/cryptodevs/aesni_mb.rst @@ -137,7 +137,8 @@ and the Multi-Buffer library version supported by them: DPDK version Multi-buffer library version ============== ============================ 20.11 - 21.08 0.53 - 1.3 - 21.11+ 1.0 - 1.5 + 21.11 - 24.07 1.0 - 1.5 + 24.11+ 1.4 - 1.5 ============== ============================ Initialization diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst index 44cff85918..b5a980b247 100644 --- a/doc/guides/cryptodevs/chacha20_poly1305.rst +++ b/doc/guides/cryptodevs/chacha20_poly1305.rst @@ -66,7 +66,8 @@ and the external crypto libraries supported by them: ============= ================================ DPDK version Crypto library version ============= ================================ - 21.11+ Multi-buffer library 1.0-1.5 + 21.11 - 24.07 Multi-buffer library 1.0 - 1.5 + 24.11+ Multi-buffer library 1.4 - 1.5 ============= ================================ Initialization diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst index 4070f025e1..b57f18b56f 100644 --- a/doc/guides/cryptodevs/kasumi.rst +++ b/doc/guides/cryptodevs/kasumi.rst @@ -80,7 +80,8 @@ and the external crypto libraries supported by them: DPDK version Crypto library version ============= ================================ 20.02 - 21.08 Multi-buffer library 0.53 - 1.3 - 21.11+ Multi-buffer library 1.0 - 1.5 + 21.11 - 24.07 Multi-buffer library 1.0 - 1.5 + 24.11+ Multi-buffer library 1.4 - 1.5 ============= ================================ Initialization diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst index 6eb8229fb5..fb4e0448ac 100644 --- a/doc/guides/cryptodevs/snow3g.rst +++ b/doc/guides/cryptodevs/snow3g.rst @@ -89,7 +89,8 @@ and the external crypto libraries supported by them: DPDK version Crypto library version ============= ================================ 20.02 - 21.08 Multi-buffer library 0.53 - 1.3 - 21.11+ Multi-buffer library 1.0 - 1.5 + 21.11 - 24.07 Multi-buffer library 1.0 - 1.5 + 24.11+ Multi-buffer library 1.4 - 1.5 ============= ================================ Initialization diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst index 29fe6279aa..4615562246 100644 --- a/doc/guides/cryptodevs/zuc.rst +++ b/doc/guides/cryptodevs/zuc.rst @@ -88,7 +88,8 @@ and the external crypto libraries supported by them: DPDK version Crypto library version ============= ================================ 20.02 - 21.08 Multi-buffer library 0.53 - 1.3 - 21.11+ Multi-buffer library 1.0 - 1.5 + 21.11 - 24.07 Multi-buffer library 1.0 - 1.5 + 24.11+ Multi-buffer library 1.4 - 1.5 ============= ================================ Initialization diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst index f99bfc6087..e6565bab83 100644 --- a/doc/guides/dmadevs/dpaa.rst +++ b/doc/guides/dmadevs/dpaa.rst @@ -42,6 +42,11 @@ Compilation For builds using ``meson`` and ``ninja``, the driver will be built when the target platform is dpaa-based. No additional compilation steps are necessary. +``RTE_DMA_DPAA_ERRATA_ERR050757`` + Enable software workaround for Errata-A050757 +``RTE_DMA_DPAA_ERRATA_ERR050265`` + Enable software workaround for Errata-A050265 + Initialization -------------- @@ -66,3 +71,10 @@ Platform Requirement DPAA DMA driver for DPDK can only work on NXP SoCs as listed in the `Supported DPAA SoCs`_. + +Device Arguments +---------------- + +``dpaa_dma_err_check=1`` + Check DMA errors at driver level. + Usage example: ``dpaa_bus:dpaa_qdma-1,dpaa_dma_err_check=1`` diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst index d2c26231e2..646860843f 100644 --- a/doc/guides/dmadevs/dpaa2.rst +++ b/doc/guides/dmadevs/dpaa2.rst @@ -73,3 +73,18 @@ Platform Requirement DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the ``Supported DPAA2 SoCs``. + +Device Arguments +---------------- + +``fle_pre_populate=1`` + Pre-populate all DMA descriptors with pre-initialized values. + Usage example: ``fslmc:dpdmai.1,fle_pre_populate=1`` + +``desc_debug=1`` + Enable descriptor debugs. + Usage example: ``fslmc:dpdmai.1,desc_debug=1`` + +``short_fd=1`` + Enable short FDs. + Usage example: ``fslmc:dpdmai.1,short_fd=1`` diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst index e21846f4e0..44771ecbde 100644 --- a/doc/guides/eventdevs/cnxk.rst +++ b/doc/guides/eventdevs/cnxk.rst @@ -16,6 +16,7 @@ Supported OCTEON cnxk SoCs - CN9XX - CN10XX +- CN20XX Features -------- @@ -36,7 +37,7 @@ Features of the OCTEON cnxk SSO PMD are: DRAM - HW accelerated dequeue timeout support to enable power management - HW managed event timers support through TIM, with high precision and - time granularity of 2.5us on CN9K and 1us on CN10K. + time granularity of 2.5us on CN9K and 1us on CN10K/CN20K. - Up to 256 TIM rings a.k.a event timer adapters. - Up to 8 rings traversed in parallel. - HW managed packets enqueued from ethdev to eventdev exposed through event eth @@ -45,8 +46,8 @@ Features of the OCTEON cnxk SSO PMD are: - Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability while maintaining receive packet order. - Full Rx/Tx offload support defined through ethdev queue configuration. -- HW managed event vectorization on CN10K for packets enqueued from ethdev to - eventdev configurable per each Rx queue in Rx adapter. +- HW managed event vectorization on CN10K/CN20K for packets enqueued from ethdev + to eventdev configurable per each Rx queue in Rx adapter. - Event vector transmission via Tx adapter. - Up to 2 event link profiles. @@ -93,9 +94,9 @@ Runtime Config Options -a 0002:0e:00.0,qos=[1-50-50] -- ``CN10K WQE stashing support`` +- ``CN10K/CN20K WQE stashing support`` - CN10K supports stashing the scheduled WQE carried by `rte_event` to the + CN10K/CN20K supports stashing the scheduled WQE carried by `rte_event` to the cores L2 Dcache. The number of cache lines to be stashed and the offset is configurable per HWGRP i.e. event queue. The dictionary format is as follows `[Qx|stash_offset|stash_length]` here the stash offset can be @@ -188,8 +189,8 @@ Runtime Config Options -a 0002:0e:00.0,tim_eclk_freq=122880000-1000000000-0 -Power Saving on CN10K ---------------------- +Power Saving on CN10K/CN20K +--------------------------- ARM cores can additionally use WFE when polling for transactions on SSO bus to save power i.e., in the event dequeue call ARM core can enter WFE and exit diff --git a/doc/guides/freebsd_gsg/freebsd_eal_parameters.rst b/doc/guides/freebsd_gsg/freebsd_eal_parameters.rst index fba467a2ce..9270d9fa3b 100644 --- a/doc/guides/freebsd_gsg/freebsd_eal_parameters.rst +++ b/doc/guides/freebsd_gsg/freebsd_eal_parameters.rst @@ -18,3 +18,30 @@ FreeBSD-specific EAL parameters ------------------------------- There are currently no FreeBSD-specific EAL command-line parameters available. + +Other options +~~~~~~~~~~~~~ + +* ``--syslog `` + + Set syslog facility. Valid syslog facilities are:: + + auth + cron + daemon + ftp + kern + lpr + mail + news + syslog + user + uucp + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst index 6db880d632..e4d80a756d 100644 --- a/doc/guides/nics/bnxt.rst +++ b/doc/guides/nics/bnxt.rst @@ -538,10 +538,11 @@ Time Synchronization ~~~~~~~~~~~~~~~~~~~~ System operators may run a PTP (Precision Time Protocol) client application to -synchronize the time on the NIC (and optionally, on the system) to a PTP master. +synchronize the time on the NIC (and optionally, on the system) to a PTP time transmitter. -The BNXT PMD supports a PTP client application to communicate with a PTP master -clock using DPDK IEEE1588 APIs. Note that the PTP client application needs to +The BNXT PMD supports a PTP client application to communicate with a PTP time transmitter +using DPDK IEEE1588 APIs. +Note that the PTP client application needs to run on PF and vector mode needs to be disabled. .. code-block:: console diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst index 69eabf5616..154201e745 100644 --- a/doc/guides/nics/cpfl.rst +++ b/doc/guides/nics/cpfl.rst @@ -37,6 +37,8 @@ Here is the suggested matching list which has been tested and verified. +------------+------------------+ | 24.07 | 1.4 | +------------+------------------+ + | 24.11 | 1.6 | + +------------+------------------+ Configuration diff --git a/doc/guides/nics/ena.rst b/doc/guides/nics/ena.rst index a40f09be09..a34575dc9b 100644 --- a/doc/guides/nics/ena.rst +++ b/doc/guides/nics/ena.rst @@ -120,10 +120,6 @@ Runtime Configuration 3 - Enforce large LLQ policy. - * **normal_llq_hdr** (default 0) - - Enforce normal LLQ policy. - * **miss_txc_to** (default 5) Number of seconds after which the Tx packet will be considered missing. diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini index 1e9a156a2a..e50514d750 100644 --- a/doc/guides/nics/features/default.ini +++ b/doc/guides/nics/features/default.ini @@ -170,6 +170,7 @@ indirect_list = ipv6_ext_push = ipv6_ext_remove = jump = +jump_to_table_index = mac_swap = mark = meter = diff --git a/doc/guides/nics/features/dpaa2.ini b/doc/guides/nics/features/dpaa2.ini index f02da463d9..5f9c587847 100644 --- a/doc/guides/nics/features/dpaa2.ini +++ b/doc/guides/nics/features/dpaa2.ini @@ -32,17 +32,21 @@ ARMv8 = Y Usage doc = Y [rte_flow items] +ah = Y +ecpri = Y +esp = Y eth = P gre = Y +gtp = Y icmp = Y ipv4 = Y ipv6 = Y -meta = Y raw = Y sctp = Y tcp = Y udp = Y vlan = P +vxlan = Y [rte_flow actions] drop = Y diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini index 8b623d3077..5326d20e72 100644 --- a/doc/guides/nics/features/hns3.ini +++ b/doc/guides/nics/features/hns3.ini @@ -59,10 +59,11 @@ icmp = Y ipv4 = Y ipv6 = Y nvgre = Y +ptype = P sctp = Y tcp = Y udp = Y -vlan = P +vlan = Y vxlan = Y vxlan_gpe = Y diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini index 056e04275b..264254839f 100644 --- a/doc/guides/nics/features/mlx5.ini +++ b/doc/guides/nics/features/mlx5.ini @@ -114,6 +114,7 @@ indirect_list = Y ipv6_ext_push = Y ipv6_ext_remove = Y jump = Y +jump_to_table_index = Y mark = Y meter = Y meter_mark = Y diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp.ini index c3c282b288..124663ae00 100644 --- a/doc/guides/nics/features/nfp.ini +++ b/doc/guides/nics/features/nfp.ini @@ -25,6 +25,9 @@ L4 checksum offload = Y Packet type parsing = Y Basic stats = Y Stats per queue = Y +EEPROM dump = Y +Module EEPROM dump = Y +LED = Y Linux = Y Multiprocess aware = Y x86-64 = Y diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini index 8b9b87bdfe..1bf9bd76db 100644 --- a/doc/guides/nics/features/ntnic.ini +++ b/doc/guides/nics/features/ntnic.ini @@ -10,5 +10,39 @@ Link status = Y Queue start/stop = Y Unicast MAC filter = Y Multicast MAC filter = Y +RSS hash = Y +RSS key update = Y +Basic stats = Y +Extended stats = Y +MTU update = Y Linux = Y x86-64 = Y + +[rte_flow items] +any = Y +eth = Y +gtp = Y +gtp_psc = Y +icmp = Y +icmp6 = Y +ipv4 = Y +ipv6 = Y +port_id = Y +sctp = Y +tcp = Y +udp = Y +vlan = Y + +[rte_flow actions] +age = Y +drop = Y +jump = Y +mark = Y +meter = Y +modify_field = Y +passthru = Y +port_id = Y +queue = Y +raw_decap = Y +raw_encap = Y +rss = Y diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini new file mode 100644 index 0000000000..05c8091ed7 --- /dev/null +++ b/doc/guides/nics/features/zxdh.ini @@ -0,0 +1,9 @@ +; +; Supported features of the 'zxdh' network poll mode driver. +; +; Refer to default.ini for the full list of available PMD features. +; +[Features] +Linux = Y +x86-64 = Y +ARMv8 = Y diff --git a/doc/guides/nics/hns3.rst b/doc/guides/nics/hns3.rst index ba193ee766..a93b300895 100644 --- a/doc/guides/nics/hns3.rst +++ b/doc/guides/nics/hns3.rst @@ -183,6 +183,26 @@ Runtime Configuration -a 0000:7d:00.0,fdir_vlan_match_mode=nostrict +- ``fdir_tuple_config`` (default ``none``) + + Used to customize the flow director tuples. Current supported options are follows: + ``+outvlan-insmac``: means disable inner src mac tuple, and enable outer vlan tuple. + ``+outvlan-indmac``: means disable inner dst mac tuple, and enable outer vlan tuple. + ``+outvlan-insip``: means disable inner src ip tuple, and enable outer vlan tuple. + ``+outvlan-indip``: means disable inner dst ip tuple, and enable outer vlan tuple. + ``+outvlan-sctptag``: means disable sctp tag tuple, and enable outer vlan tuple. + ``+outvlan-tunvni``: means disable tunnel vni tuple, and enable outer vlan tuple. + +- ``fdir_index_config`` (default ``hash``) + + Used to select flow director index strategy, + the flow director index is the index position in the hardware flow director table. + Lower index denotes higher priority + (it means when a packet matches multiple indexes, the smaller index wins). + Current supported options are as follows: + ``hash``: The driver generates a flow index based on the hash of the rte_flow key. + ``priority``: the driver uses the rte_flow priority field as the flow director index. + Driver compilation and testing ------------------------------ @@ -312,6 +332,9 @@ Generic flow API configuration for hardware which will affect other rules. The rule just setting input tuple is completely independent. + In addition, if the rule priority level is set, no error is reported, + but the rule priority level does not take effect. + Run ``testpmd``: .. code-block:: console diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst index 91b5a9b461..eac1f2627b 100644 --- a/doc/guides/nics/ice.rst +++ b/doc/guides/nics/ice.rst @@ -138,6 +138,26 @@ Runtime Configuration -a 80:00.0,default-mac-disable=1 +- ``DDP Package File`` + + Rather than have the driver search for the DDP package to load, + or to override what package is used, + the ``ddp_pkg_file`` option can be used to provide the path to a specific package file. + For example:: + + -a 80:00.0,ddp_pkg_file=/path/to/ice-version.pkg + +- ``Traffic Management Scheduling Levels`` + + The DPDK Traffic Management (rte_tm) APIs can be used to configure the Tx scheduler on the NIC. + From 24.11 release, all available hardware layers are available to software. + Earlier versions of DPDK only supported 3 levels in the scheduling hierarchy. + To help with backward compatibility the ``tm_sched_levels`` parameter + can be used to limit the scheduler levels to the provided value. + The provided value must be between 3 and 8. + If the value provided is greater than the number of levels provided by the HW, + SW will use the hardware maximum value. + - ``Protocol extraction for per queue`` Configure the RX queues to do protocol extraction into mbuf for protocol @@ -289,6 +309,21 @@ Runtime Configuration As a trade-off, this configuration may cause the packet processing performance degradation due to the PCI bandwidth limitation. +- ``Tx Scheduler Topology Download`` + + The default Tx scheduler topology exposed by the NIC, + generally a 9-level topology of which 8 levels are SW configurable, + may be updated by a new topology loaded from a DDP package file. + The ``ddp_load_sched_topo`` option can be used to specify that the scheduler topology, + if any, in the DDP package file being used should be loaded into the NIC. + For example:: + + -a 0000:88:00.0,ddp_load_sched_topo=1 + + or:: + + -a 0000:88:00.0,ddp_pkg_file=/path/to/pkg.file,ddp_load_sched_topo=1 + - ``Tx diagnostics`` (default ``not enabled``) Set the ``devargs`` parameter ``mbuf_check`` to enable Tx diagnostics. @@ -423,21 +458,27 @@ Traffic Management Support ~~~~~~~~~~~~~~~~~~~~~~~~~~ The ice PMD provides support for the Traffic Management API (RTE_TM), -allow users to offload a 3-layers Tx scheduler on the E810 NIC: - -- ``Port Layer`` - - This is the root layer, support peak bandwidth configuration, - max to 32 children. - -- ``Queue Group Layer`` - - The middle layer, support peak / committed bandwidth, weight, priority configurations, - max to 8 children. - -- ``Queue Layer`` - - The leaf layer, support peak / committed bandwidth, weight, priority configurations. +enabling users to configure and manage the traffic shaping and scheduling of transmitted packets. +By default, all available transmit scheduler layers are available for configuration, +allowing up to 2000 queues to be configured in a hierarchy of up to 8 levels. +The number of levels in the hierarchy can be adjusted via driver parameters: + +* the default 9-level topology (8 levels usable) can be replaced by a new topology downloaded from a DDP file, + using the driver parameter ``ddp_load_sched_topo=1``. + Using this mechanism, if the number of levels is reduced, + the possible fan-out of child-nodes from each level may be increased. + The default topology is a 9-level tree with a fan-out of 8 at each level. + Released DDP package files contain a 5-level hierarchy (4-levels usable), + with increased fan-out at the lower 3 levels + e.g. 64 at levels 2 and 3, and 256 or more at the leaf-node level. + +* the number of levels can be reduced + by setting the driver parameter ``tm_sched_levels`` to a lower value. + This scheme will reduce in software the number of editable levels, + but will not affect the fan-out from each level. + +For more details on how to configure a Tx scheduling hierarchy, +please refer to the ``rte_tm`` `API documentation `_. Additional Options ++++++++++++++++++ diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst index c14bc7988a..8e371ac4a5 100644 --- a/doc/guides/nics/index.rst +++ b/doc/guides/nics/index.rst @@ -69,3 +69,4 @@ Network Interface Controller Drivers vhost virtio vmxnet3 + zxdh diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst index 14573b542e..c5c6a6c34b 100644 --- a/doc/guides/nics/ixgbe.rst +++ b/doc/guides/nics/ixgbe.rst @@ -76,7 +76,7 @@ Scattered packets are not supported in this mode. If an incoming packet is greater than the maximum acceptable length of one "mbuf" data size (by default, the size is 2 KB), vPMD for RX would be disabled. -By default, IXGBE_MAX_RING_DESC is set to 4096 and RTE_PMD_IXGBE_RX_MAX_BURST is set to 32. +By default, IXGBE_MAX_RING_DESC is set to 8192 and RTE_PMD_IXGBE_RX_MAX_BURST is set to 32. Windows Prerequisites and Pre-conditions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index 1dccdaad50..b1d6863f36 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -2360,6 +2360,12 @@ Steps to enable Tx datapath tracing: The parameter of the script is the trace data folder. + The optional parameter ``-a`` forces to dump incomplete bursts. + + The optional parameter ``-v [level]`` forces to dump raw records data + for the specified level and below. + Level 0 dumps bursts, level 1 dumps WQEs, level 2 dumps mbufs. + .. code-block:: console mlx5_trace.py /var/log/rte-2023-01-23-AM-11-52-39 diff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst index 2c160ae592..f186822504 100644 --- a/doc/guides/nics/ntnic.rst +++ b/doc/guides/nics/ntnic.rst @@ -40,6 +40,37 @@ Features - Unicast MAC filter - Multicast MAC filter - Promiscuous mode (Enable only. The device always run promiscuous mode) +- Flow API support. +- Support for multiple rte_flow groups. +- Multiple TX and RX queues. +- Scattered and gather for TX and RX. +- Jumbo frame support. +- Traffic mirroring. +- VLAN filtering. +- Packet modification: NAT, TTL decrement, DSCP tagging +- Tunnel types: GTP. +- Encapsulation and decapsulation of GTP data. +- RX VLAN stripping via raw decap. +- TX VLAN insertion via raw encap. +- CAM and TCAM based matching. +- Exact match of 140 million flows and policies. +- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum verification. +- RSS hash +- RSS key update +- RSS based on VLAN or 5-tuple. +- RSS using different combinations of fields: L3 only, L4 only or both, + and source only, destination only or both. +- Several RSS hash keys, one for each flow type. +- Default RSS operation with no hash key specification. +- Port and queue statistics. +- RMON statistics in extended stats. +- Link state information. +- Flow statistics +- Flow aging support +- Flow metering, including meter policy API. +- Flow update. Update of the action list for specific flow +- Asynchronous flow support +- MTU update Limitations ~~~~~~~~~~~ @@ -118,3 +149,33 @@ FILTER To enable logging on all levels use wildcard in the following way:: --log-level=pmd.net.ntnic.*,8 + +Flow Scanner +------------ + +Flow Scanner is DPDK mechanism that constantly and periodically scans +the flow tables to check for aged-out flows. +When flow timeout is reached, +i.e. no packets were matched by the flow within timeout period, +``RTE_ETH_EVENT_FLOW_AGED`` event is reported, and flow is marked as aged-out. + +Therefore, flow scanner functionality is closely connected to the flows' age action. + +There are list of characteristics that age timeout action has: + +- functions only in group > 0; +- flow timeout is specified in seconds; +- flow scanner checks flows age timeout once in 1-480 seconds, + therefore, flows may not age-out immediately, + depending on how big are intervals of flow scanner mechanism checks; +- aging counters can display maximum of **n - 1** aged flows + when aging counters are set to **n**; +- overall 15 different timeouts can be specified for the flows at the same time + (note that this limit is combined for all actions, therefore, + 15 different actions can be created at the same time, + maximum limit of 15 can be reached only across different groups - + when 5 flows with different timeouts are created per one group, + otherwise the limit within one group is 14 distinct flows); +- after flow is aged-out it's not automatically deleted; +- aged-out flow can be updated with ``flow update`` command, + and its aged-out status will be reverted; diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst new file mode 100644 index 0000000000..2144753d75 --- /dev/null +++ b/doc/guides/nics/zxdh.rst @@ -0,0 +1,34 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2024 ZTE Corporation. + +ZXDH Poll Mode Driver +===================== + +The ZXDH PMD (**librte_net_zxdh**) provides poll mode driver support +for 25/100 Gbps ZXDH NX Series Ethernet Controller +based on the ZTE Ethernet Controller E310/E312. + +- Learn about ZXDH NX Series Ethernet Controller NICs using + ``_. + + +Features +-------- + +Features of the ZXDH PMD are: + +- Multi arch support: x86_64, ARMv8. + + +Driver compilation and testing +------------------------------ + +Refer to the document :ref:`compiling and testing a PMD for a NIC ` +for details. + + +Limitations or Known issues +--------------------------- + +Datapath and some operations are not supported and will be provided later. +X86-32, Power8, ARMv7, RISC-V, Windows and BSD are not supported yet. diff --git a/doc/guides/platform/cnxk.rst b/doc/guides/platform/cnxk.rst index 0e61bc91d9..4aa900dd63 100644 --- a/doc/guides/platform/cnxk.rst +++ b/doc/guides/platform/cnxk.rst @@ -78,6 +78,8 @@ DPDK subsystem. +---+-----+--------------------------------------------------------------+ | 13| ML | rte_mldev | +---+-----+--------------------------------------------------------------+ + | 14| RVU | rte_rawdev | + +---+-----+--------------------------------------------------------------+ PF0 is called the administrative / admin function (AF) and has exclusive privileges to provision RVU functional block's LFs to each of the PF/VF. @@ -175,6 +177,9 @@ This section lists dataplane H/W block(s) available in cnxk SoC. #. **ML Device Driver** See :doc:`../mldevs/cnxk` for Machine Learning device driver information. +#. **RVU LF Driver** + See :doc:`../rawdevs/cnxk_rvu_lf` for RVU LF driver information. + Procedure to Setup Platform --------------------------- @@ -587,8 +592,9 @@ Compile DPDK DPDK may be compiled either natively on OCTEON CN9K/CN10K platform or cross-compiled on an x86 based platform. -Meson build option ``enable_iova_as_pa`` is disabled on CNXK platforms. -So only PMDs supporting this option are enabled on CNXK platform builds. +The Meson build option ``enable_iova_as_pa`` should be set to false +because on CNXK platforms, IOVA is same as the virtual address. +Disabling the iova field in the mbuf frees it up to be used as a dynamic field. Native Compilation ~~~~~~~~~~~~~~~~~~ @@ -599,14 +605,14 @@ CN9K: .. code-block:: console - meson setup -Dplatform=cn9k build + meson setup -Dplatform=cn9k -Denable_iova_as_pa=false build ninja -C build CN10K: .. code-block:: console - meson setup -Dplatform=cn10k build + meson setup -Dplatform=cn10k -Denable_iova_as_pa=false build ninja -C build Cross Compilation @@ -618,14 +624,14 @@ CN9K: .. code-block:: console - meson setup build --cross-file config/arm/arm64_cn9k_linux_gcc + meson setup -Denable_iova_as_pa=false build --cross-file config/arm/arm64_cn9k_linux_gcc ninja -C build CN10K: .. code-block:: console - meson setup build --cross-file config/arm/arm64_cn10k_linux_gcc + meson setup -Denable_iova_as_pa=false build --cross-file config/arm/arm64_cn10k_linux_gcc ninja -C build .. note:: diff --git a/doc/guides/platform/dpaa2.rst b/doc/guides/platform/dpaa2.rst index 2b0d93a976..c9ec21334f 100644 --- a/doc/guides/platform/dpaa2.rst +++ b/doc/guides/platform/dpaa2.rst @@ -105,8 +105,8 @@ separately: Currently supported by DPDK: -- NXP SDK **LSDK 19.09++**. -- MC Firmware version **10.18.0** and higher. +- NXP SDK **LSDK 21.08++**. +- MC Firmware version **10.37.0** and higher. - Supported architectures: **arm64 LE**. - Follow the DPDK :ref:`Getting Started Guide for Linux ` diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst index b9fac1839d..04214a05b2 100644 --- a/doc/guides/prog_guide/env_abstraction_layer.rst +++ b/doc/guides/prog_guide/env_abstraction_layer.rst @@ -429,12 +429,45 @@ with them once they're registered. Per-lcore and Shared Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. note:: - - lcore refers to a logical execution unit of the processor, sometimes called a hardware *thread*. - -Shared variables are the default behavior. -Per-lcore variables are implemented using *Thread Local Storage* (TLS) to provide per-thread local storage. +By default, static variables, memory blocks allocated on the DPDK heap, +and other types of memory are shared by all DPDK threads. + +An application, a DPDK library, or a PMD may opt to keep per-thread state. + +Per-thread data can be maintained using either :doc:`lcore variables `, +*thread-local storage (TLS)* (see ``rte_per_lcore.h``), +or a static array of ``RTE_MAX_LCORE`` elements, indexed by ``rte_lcore_id()``. +These methods allow per-lcore data to be largely internal to the module +and not directly exposed in its API. +Another approach is to explicitly handle per-thread aspects in the API +(e.g., the ports in the eventdev API). + +Lcore variables are suitable for small objects that are statically allocated +at the time of module or application initialization. +An lcore variable takes on one value for each lcore ID-equipped thread +(i.e., for both EAL threads and registered non-EAL threads, +in total ``RTE_MAX_LCORE`` instances). +The lifetime of lcore variables is independent of the owning threads +and can, therefore, be initialized before the threads are created. + +Variables with thread-local storage are allocated when the thread is created +and exist until the thread terminates. +These are applicable for every thread in the process. +Only very small objects should be allocated in TLS, +as large TLS objects can significantly slow down thread creation +and may unnecessarily increase the memory footprint of applications +that extensively use unregistered threads. + +A common but now largely obsolete DPDK pattern is to use a static array +sized according to the maximum number of lcore ID-equipped threads +(i.e., with ``RTE_MAX_LCORE`` elements). +To avoid *false sharing*, each element must be both cache-aligned +and include an ``RTE_CACHE_GUARD``. +This extensive use of padding causes internal fragmentation (i.e., unused space) +and reduces cache hit rates. + +For more discussions on per-lcore state, +refer to the :doc:`lcore variables documentation `. Logs ~~~~ @@ -851,9 +884,9 @@ Signal Safety Other functions are not signal safe because they use one or more library routines that are not themselves signal safe. For example, calling ``rte_panic()`` is not safe in a signal handler - because it uses ``rte_log()`` and ``rte_log()`` calls the - ``syslog()`` library function which is in the list of - signal safe functions in + because it uses ``rte_log()`` and ``rte_log()`` may call ``vfprintf()`` or + ``syslog()`` library functions which are not in the list of + signal safe functions `Signal-Safety manual page `_. The set of functions that are expected to be async-signal-safe in DPDK diff --git a/doc/guides/prog_guide/ethdev/flow_offload.rst b/doc/guides/prog_guide/ethdev/flow_offload.rst index 2d6187ed11..d0fb20896d 100644 --- a/doc/guides/prog_guide/ethdev/flow_offload.rst +++ b/doc/guides/prog_guide/ethdev/flow_offload.rst @@ -1820,6 +1820,31 @@ flows to loop between groups. | ``group`` | Group to redirect packets to | +-----------+------------------------------+ +Action: ``JUMP_TO_TABLE_INDEX`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Redirects packets to a particular index in a flow table. + +Bypassing a hierarchy of groups, +this action redirects the matched flow to the specified index +in the particular template table on the device. + +If a matched flow is redirected to a non-existing template table +or the table which doesn't contain a rule at the specified index, +then the behavior is undefined and the resulting behavior is up to driver. + +.. _table_rte_flow_action_jump_to_table_index: + +.. table:: JUMP_TO_TABLE_INDEX + + +-----------+-------------------------------------------+ + | Field | Value | + +===========+===========================================+ + | ``table`` | Template table to redirect packets to | + +-----------+-------------------------------------------+ + | ``index`` | Index in the table to redirect packets to | + +-----------+-------------------------------------------+ + Action: ``MARK`` ^^^^^^^^^^^^^^^^ diff --git a/doc/guides/prog_guide/img/lcore_var_mem_layout.svg b/doc/guides/prog_guide/img/lcore_var_mem_layout.svg new file mode 100644 index 0000000000..c4b286316c --- /dev/null +++ b/doc/guides/prog_guide/img/lcore_var_mem_layout.svg @@ -0,0 +1,310 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + 1 + + 2 + + 3 + + 4 + + 5 + + 6 + + 7 + + 0 + + int a + + char b + + <padding> + + 8 + + long c + + 16 + + long d + + 24 + + <unallocated> + + 32 + + 40 + + 48 + + 56 + + 64 + + int a + + char b + + <padding> + + 72 + + long c + + 80 + + long d + + 88 + + <unallocated> + + 96 + + 104 + + 112 + + 120 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + struct x_lcore + + + + + + + + + + + + lcore id 0 + + + + + + + + + + + + struct y_lcore + + + + + + #define RTE_MAX_LCORE 2#define RTE_MAX_LCORE_VAR 64 + + + + + + + + + + + + lcore id 1 + + + + + + + + + + + + struct x_lcore + + + + + + + + + + + + struct y_lcore + + + + + + + + + + + + struct lcore_var_buffer.data + + + + + + Handle pointers:x_lcores y_lcores + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/guides/prog_guide/img/static_array_mem_layout.svg b/doc/guides/prog_guide/img/static_array_mem_layout.svg new file mode 100644 index 0000000000..87aa5b26f5 --- /dev/null +++ b/doc/guides/prog_guide/img/static_array_mem_layout.svg @@ -0,0 +1,278 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + 1 + + 2 + + 3 + + 4 + + 5 + + 6 + + 7 + + 0 + + int a + + char b + + <padding> + + 8 + + __rte_cache_aligned <padding> + + 16 + + 24 + + 32 + + 40 + + 48 + + 56 + + 64 + + RTE_CACHE_GUARD <padding> + + 72 + + 80 + + 88 + + 96 + + 104 + + 112 + + 120 + + 128 + + int a + + char b + + <padding> + + 136 + + __rte_cache_aligned <padding> + + 144 + + 152 + + 160 + + 168 + + 176 + + 184 + + 192 + + RTE_CACHE_GUARD <padding> + + 200 + + 208 + + 216 + + 224 + + 232 + + 240 + + 248 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + struct x_lcorelcore id 0 + + + + + + + + + + + + struct x_lcore x_lcores[RTE_MAX_LCORE] + + + + + + + + + + + + struct x_lcorelcore id 1 + + + + \ No newline at end of file diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst index 7eb1a98d88..c0b5f54c26 100644 --- a/doc/guides/prog_guide/index.rst +++ b/doc/guides/prog_guide/index.rst @@ -24,6 +24,7 @@ Memory Management :maxdepth: 1 :numbered: + lcore_var mempool_lib mbuf_lib multi_proc_support diff --git a/doc/guides/prog_guide/lcore_var.rst b/doc/guides/prog_guide/lcore_var.rst new file mode 100644 index 0000000000..b492ad12c6 --- /dev/null +++ b/doc/guides/prog_guide/lcore_var.rst @@ -0,0 +1,545 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2024 Ericsson AB + +Lcore Variables +=============== + +The ``rte_lcore_var.h`` API provides a mechanism to allocate and +access per-lcore id variables in a space- and cycle-efficient manner. + + +Lcore Variables API +------------------- + +A per-lcore id variable (or lcore variable for short) +holds a unique value for each EAL thread and registered non-EAL thread. +Thus, there is one distinct value for each past, current and future +lcore id-equipped thread, with a total of ``RTE_MAX_LCORE`` instances. + +The value of the lcore variable for one lcore id is independent of the +values associated with other lcore ids within the same variable. + +For detailed information on the lcore variables API, +please refer to the ``rte_lcore_var.h`` API documentation. + + +Lcore Variable Handle +~~~~~~~~~~~~~~~~~~~~~ + +To allocate and access an lcore variable's values, a *handle* is used. +The handle is represented by an opaque pointer, +only to be dereferenced using the appropriate ```` macros. + +The handle is a pointer to the value's type +(e.g., for an ``uint32_t`` lcore variable, the handle is a ``uint32_t *``). + +The reason the handle is typed (i.e., it's not a void pointer or an integer) +is to enable type checking when accessing values of the lcore variable. + +A handle may be passed between modules and threads +just like any other pointer. + +A valid (i.e., allocated) handle never has the value NULL. +Thus, a handle set to NULL may be used +to signify that allocation has not yet been done. + + +Lcore Variable Allocation +~~~~~~~~~~~~~~~~~~~~~~~~~ + +An lcore variable is created in two steps: + +1. Define an lcore variable handle by using ``RTE_LCORE_VAR_HANDLE``. +2. Allocate lcore variable storage and initialize the handle + by using ``RTE_LCORE_VAR_ALLOC`` or ``RTE_LCORE_VAR_INIT``. + Allocation generally occurs at the time of module initialization, + but may be done at any time. + +The lifetime of an lcore variable is not tied to the thread that created it. + +Each lcore variable has ``RTE_MAX_LCORE`` values, +one for each possible lcore id. +All of an lcore variable's values may be accessed +from the moment the lcore variable is created, +throughout the lifetime of the EAL (i.e., until ``rte_eal_cleanup()``). + +Lcore variables do not need to be freed and cannot be freed. + + +Access +~~~~~~ + +The value of any lcore variable for any lcore id +may be accessed from any thread (including unregistered threads), +but it should only be *frequently* read from or written to by the *owner*. +A thread is considered the owner of a particular lcore variable value instance +if it has the lcore id associated with that instance. + +Non-owner accesses results in *false sharing*. +As long as non-owner accesses are rare, +they will have only a very slight effect on performance. +This property of lcore variables memory organization is intentional. +See the implementation section for more information. + +Values of the same lcore variable, +associated with different lcore ids may be frequently read or written +by their respective owners without risking false sharing. + +An appropriate synchronization mechanism, +such as atomic load and stores, +should be employed to prevent data races between the owning thread +and any other thread accessing the same value instance. + +The value of the lcore variable for a particular lcore id +is accessed via ``RTE_LCORE_VAR_LCORE``. + +A common pattern is for an EAL thread or a registered non-EAL thread +to access its own lcore variable value. +For this purpose, a shorthand exists as ``RTE_LCORE_VAR``. + +``RTE_LCORE_VAR_FOREACH`` may be used to iterate +over all values of a particular lcore variable. + +The handle, defined by ``RTE_LCORE_VAR_HANDLE``, +is a pointer of the same type as the value, +but it must be treated as an opaque identifier +and cannot be directly dereferenced. + +Lcore variable handles and value pointers may be freely passed +between different threads. + + +Storage +~~~~~~~ + +An lcore variable's values may be of a primitive type like ``int``, +but is typically a ``struct``. + +The lcore variable handle introduces a per-variable +(not per-value/per-lcore id) overhead of ``sizeof(void *)`` bytes, +so there are some memory footprint gains to be made by organizing +all per-lcore id data for a particular module as one lcore variable +(e.g., as a struct). + +An application may define an lcore variable handle +without ever allocating the lcore variable. + +The size of an lcore variable's value cannot exceed +the DPDK build-time constant ``RTE_MAX_LCORE_VAR``. +An lcore variable's size is the size of one of its value instance, +not the aggregate of all its ``RTE_MAX_LCORE`` instances. + +Lcore variables should generally *not* be ``__rte_cache_aligned`` +and need *not* include a ``RTE_CACHE_GUARD`` field, +since these constructs are designed to avoid false sharing. +With lcore variables, false sharing is largely avoided by other means. +In the case of an lcore variable instance, +the thread most recently accessing nearby data structures +should almost always be the lcore variable's owner. +Adding padding (e.g., with ``RTE_CACHE_GUARD``) +will increase the effective memory working set size, +potentially reducing performance. + +Lcore variable values are initialized to zero by default. + +Lcore variables are not stored in huge page memory. + + +Example +~~~~~~~ + +Below is an example of the use of an lcore variable: + +.. code-block:: c + + struct foo_lcore_state { + int a; + long b; + }; + + static RTE_LCORE_VAR_HANDLE(struct foo_lcore_state, lcore_states); + + long foo_get_a_plus_b(void) + { + const struct foo_lcore_state *state = RTE_LCORE_VAR(lcore_states); + + return state->a + state->b; + } + + RTE_INIT(rte_foo_init) + { + RTE_LCORE_VAR_ALLOC(lcore_states); + + unsigned int lcore_id; + struct foo_lcore_state *state; + RTE_LCORE_VAR_FOREACH(lcore_id, state, lcore_states) { + /* initialize state */ + } + + /* other initialization */ + } + + +Implementation +-------------- + +This section gives an overview of the implementation of lcore variables, +and some background to its design. + + +Lcore Variable Buffers +~~~~~~~~~~~~~~~~~~~~~~ + +Lcore variable values are kept in a set of ``lcore_var_buffer`` structs. + +.. literalinclude:: ../../../lib/eal/common/eal_common_lcore_var.c + :language: c + :start-after: base unit + :end-before: last allocated unit + +An lcore var buffer stores at a minimum one, but usually many, lcore variables. + +The value instances for all lcore ids are stored in the same buffer. +However, each lcore id has its own slice of the ``data`` array. +Such a slice is ``RTE_MAX_LCORE_VAR`` bytes in size. + +In this way, the values associated with a particular lcore id +are grouped spatially close (in memory). +No padding is required to prevent false sharing. + +.. literalinclude:: ../../../lib/eal/common/eal_common_lcore_var.c + :language: c + :start-after: last allocated unit + :end-before: >8 end of documented variables + +The implementation maintains a current ``lcore_var_buffer`` and an ``offset``, +where the latter tracks how many bytes of this current buffer has been allocated. + +The ``offset`` is progressively incremented +(by the size of the just-allocated lcore variable), +as lcore variables are being allocated. + +If the allocation of a variable would result in an ``offset`` larger +than ``RTE_MAX_LCORE_VAR`` (i.e., the slice size), the buffer is full. +In that case, new buffer is allocated off the heap, and the ``offset`` is reset. + +The lcore var buffers are arranged in a link list, +to allow freeing them at the point of ``rte_eal_cleanup()``. + +The lcore variable buffers are allocated off the regular C heap. +There are a number of reasons for not using ```` +and huge pages for lcore variables: + +- The libc heap is available at any time, + including early in the DPDK initialization. +- The amount of data kept in lcore variables is projected to be small, + and thus is unlikely to induce translate lookaside buffer (TLB) misses. +- The last (and potentially only) lcore buffer in the chain + will likely only partially be in use. + Huge pages of the sort used by DPDK are always resident in memory, + and their use would result in a significant amount of memory going to waste. + An example: ~256 kB worth of lcore variables are allocated + by DPDK libraries, PMDs and the application. + ``RTE_MAX_LCORE_VAR`` is set to 1 MB and ``RTE_MAX_LCORE`` to 128. + With 4 kB OS pages, only the first ~64 pages of each of the 128 per-lcore id slices + in the (only) ``lcore_var_buffer`` will actually be resident (paged in). + Here, demand paging saves ~98 MB of memory. + +.. note:: + + Not residing in huge pages, lcore variables cannot be accessed from secondary processes. + +Heap allocation failures are treated as fatal. +The reason for this unorthodox design is that a majority of the allocations +are deemed to happen at initialization. +An early heap allocation failure for a fixed amount of data is a situation +not unlike one where there is not enough memory available for static variables +(i.e., the BSS or data sections). + +Provided these assumptions hold true, it's deemed acceptable +to leave the application out of handling memory allocation failures. + +The upside of this approach is that no error handling code is required +on the API user side. + + +Lcore Variable Handles +~~~~~~~~~~~~~~~~~~~~~~ + +Upon lcore variable allocation, the lcore variables API returns +an opaque *handle* in the form of a pointer. +The value of the pointer is ``buffer->data + offset``. + +Translating a handle base pointer to a pointer to a value +associated with a particular lcore id is straightforward: + +.. literalinclude:: ../../../lib/eal/include/rte_lcore_var.h + :language: c + :start-after: access function 8< + :end-before: >8 end of access function + +``RTE_MAX_LCORE_VAR`` is a public macro to allow the compiler +to optimize the ``lcore_id * RTE_MAX_LCORE_VAR`` expression, +and replace the multiplication with a less expensive arithmetic operation. + +To maintain type safety, the ``RTE_LCORE_VAR*()`` macros should be used, +instead of directly invoking ``rte_lcore_var_lcore()``. +The macros return a pointer of the same type as the handle +(i.e., a pointer to the value's type). + + +Memory Layout +~~~~~~~~~~~~~ + +This section describes how lcore variables are organized in memory. + +As an illustration, two example modules are used, +``rte_x`` and ``rte_y``, both maintaining per-lcore id state +as a part of their implementation. + +Two different methods will be used to maintain such state - +lcore variables and, to serve as a reference, lcore id-indexed static arrays. + +Certain parameters are scaled down to make graphical depictions more practical. + +For the purpose of this exercise, a ``RTE_MAX_LCORE`` of 2 is assumed. +In a real-world configuration, the maximum number of +EAL threads and registered threads will be much greater (e.g., 128). + +The lcore variables example assumes a ``RTE_MAX_LCORE_VAR`` of 64. +In a real-world configuration (as controlled by ``rte_config.h``), +the value of this compile-time constant will be much greater (e.g., 1048576). + +The per-lcore id state is also smaller than what most real-world modules would have. + +Lcore Variables Example +^^^^^^^^^^^^^^^^^^^^^^^ + +When lcore variables are used, the parts of ``rte_x`` and ``rte_y`` +that deal with the declaration and allocation of per-lcore id data +may look something like below. + +.. code-block:: c + + /* -- Lcore variables -- */ + + /* rte_x.c */ + + struct x_lcore + { + int a; + char b; + }; + + static RTE_LCORE_VAR_HANDLE(struct x_lcore, x_lcores); + RTE_LCORE_VAR_INIT(x_lcores); + + /../ + + /* rte_y.c */ + + struct y_lcore + { + long c; + long d; + }; + + static RTE_LCORE_VAR_HANDLE(struct y_lcore, y_lcores); + RTE_LCORE_VAR_INIT(y_lcores); + + /../ + +The resulting memory layout will look something like the following: + +.. figure:: img/lcore_var_mem_layout.* + +The above figure assumes that ``x_lcores`` is allocated prior to ``y_lcores``. +``RTE_LCORE_VAR_INIT()`` relies constructors, run prior to ``main()`` in an undefined order. + +The use of lcore variables ensures that per-lcore id data is kept in close proximity, +within a designated region of memory. +This proximity enhances data locality and can improve performance. + +Lcore Id Index Static Array Example +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Below is an example of the struct declarations, +declarations and the resulting organization in memory +in case an lcore id indexed static array of cache-line aligned, +RTE_CACHE_GUARDed structs are used to maintain per-lcore id state. + +This is a common pattern in DPDK, which lcore variables attempts to replace. + +.. code-block:: c + + /* -- Cache-aligned static arrays -- */ + + /* rte_x.c */ + + struct __rte_cache_aligned x_lcore + { + int a; + char b; + RTE_CACHE_GUARD; + }; + + static struct x_lcore x_lcores[RTE_MAX_LCORE]; + + /../ + + /* rte_y.c */ + + struct __rte_cache_aligned y_lcore + { + long c; + long d; + RTE_CACHE_GUARD; + }; + + static struct y_lcore y_lcores[RTE_MAX_LCORE]; + + /../ + +In this approach, accessing the state for a particular lcore id is merely +a matter retrieving the lcore id and looking up the correct struct instance. + +.. code-block:: c + + struct x_lcore *my_lcore_state = &x_lcores[rte_lcore_id()]; + +The address "0" at the top of the left-most column in the figure +represent the base address for the ``x_lcores`` array +(in the BSS segment in memory). + +The figure only includes the memory layout for the ``rte_x`` example module. +``rte_y`` would look very similar, with ``y_lcores`` being located +at some other address in the BSS section. + +.. figure:: img/static_array_mem_layout.* + +The static array approach results in the per-lcore id +being organized around modules, not lcore ids. +To avoid false sharing, an extensive use of padding is employed, +causing cache fragmentation. + +Because the padding is interspersed with the data, +demand paging is unlikely to reduce the actual resident DRAM memory footprint. +This is because the padding is smaller +than a typical operating system memory page (usually 4 kB). + + +Performance +~~~~~~~~~~~ + +One of the goals of lcore variables is to improve performance. +This is achieved by packing often-used data in fewer cache lines, +and thus reducing fragmentation in CPU caches +and thus somewhat improving the effective cache size and cache hit rates. + +The application-level gains depends much on how much data is kept in lcore variables, +and how often it is accessed, +and how much pressure the application asserts on the CPU caches +(i.e., how much other memory it accesses). + +The ``lcore_var_perf_autotest`` is an attempt at exploring +the performance benefits (or drawbacks) of lcore variables +compared to its alternatives. +Being a micro benchmark, it needs to be taken with a grain of salt. + +Generally, one shouldn't expect more than some very modest gains in performance +after a switch from lcore id indexed arrays to lcore variables. + +An additional benefit of the use of lcore variables is that it avoids +certain tricky issues related to CPU core hardware prefetching +(e.g., next-N-lines prefetching) that may cause false sharing +even when data used by two cores do not reside on the same cache line. +Hardware prefetch behavior is generally not publicly documented +and varies across CPU vendors, CPU generations and BIOS (or similar) configurations. +For applications aiming to be portable, this may cause issues. +Often, CPU hardware prefetch-induced issues are non-existent, +except some particular circumstances, where their adverse effects may be significant. + + +Alternatives +------------ + +Lcore Id Indexed Static Arrays +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Lcore variables are designed to replace a pattern exemplified below: + +.. code-block:: c + + struct __rte_cache_aligned foo_lcore_state { + int a; + long b; + RTE_CACHE_GUARD; + }; + + static struct foo_lcore_state lcore_states[RTE_MAX_LCORE]; + +This scheme is simple and effective, but has one drawback: +the data is organized so that objects related to all lcores for a particular module +are kept close in memory. +At a bare minimum, this requires sizing data structures +(e.g., using ``__rte_cache_aligned``) to an even number of cache lines +and ensuring that allocation of such objects +are cache line aligned to avoid false sharing. +With CPU hardware prefetching and memory loads resulting from speculative execution +(functions which seemingly are getting more eager faster +than they are getting more intelligent), +one or more "guard" cache lines may be required +to separate one lcore's data from another's and prevent false sharing. + +Lcore variables offer the advantage of working with, +rather than against, the CPU's assumptions. +A next-line hardware prefetcher, for example, may function as intended +(i.e., to the benefit, not detriment, of system performance). + + +Thread Local Storage +~~~~~~~~~~~~~~~~~~~~ + +An alternative to ``rte_lcore_var.h`` is the ``rte_per_lcore.h`` API, +which makes use of thread-local storage +(TLS, e.g., GCC ``__thread`` or C11 ``_Thread_local``). + +There are a number of differences between using TLS +and the use of lcore variables. + +The lifecycle of a thread-local variable instance is tied to that of the thread. +The data cannot be accessed before the thread has been created, +nor after it has terminated. +As a result, thread-local variables must be initialized in a "lazy" manner +(e.g., at the point of thread creation). +Lcore variables may be accessed immediately after having been allocated +(which may occur before any thread beyond the main thread is running). + +A thread-local variable is duplicated across all threads in the process, +including unregistered non-EAL threads (i.e., "regular" threads). +For DPDK applications heavily relying on multi-threading +(in conjunction to DPDK's "one thread per core" pattern), +either by having many concurrent threads or creating/destroying threads at a high rate, +an excessive use of thread-local variables may cause inefficiencies +(e.g., increased thread creation overhead due to thread-local storage initialization +or increased memory footprint). +Lcore variables *only* exist for threads with an lcore id. + +Whether data in thread-local storage can be shared between threads +(i.e., whether a pointer to a thread-local variable can be passed to +and successfully dereferenced by a non-owning thread) +depends on the specifics of the TLS implementation. +With GCC ``__thread`` and GCC ``_Thread_local``, +data sharing between threads is supported. +In the C11 standard, accessing another thread's ``_Thread_local`` object +is implementation-defined. +Lcore variable instances may be accessed reliably by any thread. + +Lcore variables also relies on TLS to retrieve the thread's lcore id. +However, the rest of the per-thread data is not kept in TLS. + +From a memory layout perspective, TLS is similar to lcore variables, +and thus per-thread data structure need not be padded. + +In case the above-mentioned drawbacks of the use of TLS is of no significance +to a particular application, TLS is a good alternative to lcore variables. diff --git a/doc/guides/prog_guide/log_lib.rst b/doc/guides/prog_guide/log_lib.rst index ff9d1b54a2..3e888b8965 100644 --- a/doc/guides/prog_guide/log_lib.rst +++ b/doc/guides/prog_guide/log_lib.rst @@ -5,9 +5,9 @@ Log Library =========== The DPDK Log library provides the logging functionality for other DPDK libraries and drivers. -By default, in a Linux application, logs are sent to syslog and also to the console. -On FreeBSD and Windows applications, logs are sent only to the console. -However, the log function can be overridden by the user to use a different logging mechanism. +By default, logs are sent only to standard error output of the process. +The syslog EAL option can be used to redirect to the stystem logger on Linux and FreeBSD. +In addition, the log can be redirected to a different stdio file stream. Log Levels ---------- @@ -59,6 +59,7 @@ For example:: Within an application, the same result can be got using the ``rte_log_set_level_pattern()`` or ``rte_log_set_level_regex()`` APIs. + Using Logging APIs to Generate Log Messages ------------------------------------------- @@ -110,3 +111,54 @@ Throughout the cfgfile library, all logging calls are therefore of the form: CFG_LOG(ERR, "invalid comment characters %c", params->comment_character); + +Log timestamp +~~~~~~~~~~~~~ + +An optional timestamp can be added before each message by adding the ``--log-timestamp`` option. +For example:: + + /path/to/app --log-level=lib.*:debug --log-timestamp + +Multiple alternative timestamp formats are available: + +.. csv-table:: Log time stamp format + :header: "Format", "Description", "Example" + :widths: 6, 30, 32 + + "ctime", "Unix ctime", "``[Wed Mar 20 07:26:12 2024]``" + "delta", "Offset since last", "``[< 3.162373>]``" + "reltime", "Seconds since last or time if minute changed", "``[ +3.001791]`` or ``[Mar20 07:26:12]``" + "iso", "ISO-8601", "``[2024-03-20T07:26:12−07:00]``" + +To prefix all console messages with ISO format time the syntax is:: + + /path/to/app --log-timestamp=iso + +.. note:: + + Timestamp option has no effect if using syslog + because the ``syslog()`` service already does timestamping internally. + + +Color output +~~~~~~~~~~~~ + +The log library will highlight important messages. +This is controlled by the ``--log-color`` option. +The optional argument describes when color is enabled: + +:never: Do not enable color. This is the default behavior. + +:auto: Enable color only when printing to a terminal. + This is the same as ``--log-color`` with no argument. + +:always: Always print color. + +For example to enable color in logs if using terminal:: + + /path/to/app --log-color + +.. note:: + + Color output is never used for syslog or systemd journal logging. diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst index f6674efe2d..22e6e4fe1d 100644 --- a/doc/guides/prog_guide/power_man.rst +++ b/doc/guides/prog_guide/power_man.rst @@ -107,6 +107,28 @@ User Cases The power management mechanism is used to save power when performing L3 forwarding. +PM QoS +------ + +The ``/sys/devices/system/cpu/cpuX/power/pm_qos_resume_latency_us`` +sysfs interface is used to set and get the resume latency limit +on the cpuX for userspace. +Each cpuidle governor in Linux selects which idle state to enter +based on this CPU resume latency in their idle task. + +The deeper the idle state, the lower the power consumption, +but the longer the resume time. +Some services are latency sensitive and very except the low resume time, +like interrupt packet receiving mode. + +Applications can set and get the CPU resume latency with +``rte_power_qos_set_cpu_resume_latency()`` +and ``rte_power_qos_get_cpu_resume_latency()`` respectively. +Applications can set a strict resume latency (zero value) +to lower the resume latency and get better performance +(instead, the power consumption of platform may increase). + + Ethernet PMD Power Management API --------------------------------- @@ -191,8 +213,8 @@ API Overview for Ethernet PMD Power Management * **Set Scaling Max Freq**: Set the maximum frequency (kHz) to be used in Frequency Scaling mode. -Intel Uncore API ----------------- +Uncore API +---------- Abstract ~~~~~~~~ @@ -203,6 +225,9 @@ to achieve high performance: L3 cache, on-die memory controller, etc. Significant power savings can be achieved by reducing the uncore frequency to its lowest value. +Intel Uncore +~~~~~~~~~~~~ + The Linux kernel provides the driver "intel-uncore-frequency" to control the uncore frequency limits for x86 platform. The driver is available from kernel version 5.6 and above. @@ -211,10 +236,23 @@ which was added in 5.6. This manipulates the context of MSR 0x620, which sets min/max of the uncore for the SKU. -API Overview for Intel Uncore -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +AMD EPYC Uncore +~~~~~~~~~~~~~~~ + +On AMD EPYC platforms, the Host System Management Port (HSMP) kernel module +facilitates user-level access to HSMP mailboxes, +which are implemented by the firmware in the System Management Unit (SMU). +The AMD HSMP driver is available starting from kernel version 5.18. +Please ensure that ``CONFIG_AMD_HSMP`` is enabled in your kernel configuration. + +Additionally, the EPYC System Management Interface In-band Library for Linux +offers essential API, enabling user-space software +to effectively manage system functions. + +Uncore API Overview +~~~~~~~~~~~~~~~~~~~ -Overview of each function in the Intel Uncore API, +Overview of each function in the Uncore API, with explanation of what they do. Each function should not be called in the fast path. diff --git a/doc/guides/rawdevs/cnxk_rvu_lf.rst b/doc/guides/rawdevs/cnxk_rvu_lf.rst new file mode 100644 index 0000000000..a1090d8a41 --- /dev/null +++ b/doc/guides/rawdevs/cnxk_rvu_lf.rst @@ -0,0 +1,113 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2024 Marvell. + +Marvell CNXK RVU LF Driver +========================== + +CNXK product families can have a use case to allow RVU PF and RVU VF +driver to communicate using mailboxes +and also get notified of any interrupt that may occur on the device. +Hence, a new raw device driver is added for such RVU LF devices. +These devices can map to a RVU PF or a RVU VF +which can send mailboxes to each other. + +Features +-------- + +The RVU LF device implements following features in the rawdev API: + +- Get PF FUNC of associated NPA and SSO devices. +- Get PF FUNC for RVU LF device. +- Register/unregister interrupt handlers. +- Register/unregister mailbox callbacks for the other side to process mailboxes. +- Set mailbox message ID range to be used by the driver. +- Process mailbox messages. +- Get BAR Addresses for the out-of-tree drivers to configure registers. + +Limitations +----------- + +In multi-process mode user-space application must ensure +no resources sharing takes place. +Otherwise, user-space application should ensure synchronization. + +Device Setup +------------ + +The RVU LF devices will need to be bound to a user-space IO driver for use. +The script ``dpdk-devbind.py`` included with DPDK can be used +to view the state of the devices +and to bind them to a suitable DPDK-supported kernel driver. +When querying the status of the devices, +they will appear under the category of "Misc (rawdev) devices", +i.e. the command ``dpdk-devbind.py --status-dev misc`` +can be used to see the state of those devices alone. + +Get NPA and SSO PF FUNC +----------------------- + +API functions ``rte_pmd_rvu_lf_npa_pf_func_get()`` and ``rte_pmd_rvu_lf_sso_pf_func_get()`` +can be used to get the cnxk NPA PF func and SSO PF func +which application can use for NPA/SSO specific configuration. + +Get RVU LF PF FUNC +------------------ + +API function ``rte_pmd_rvu_lf_pf_func_get()`` gets the RVU LF device PF FUNC. + +Register or remove interrupt handler +------------------------------------ + +Out-of-tree drivers can register interrupt handlers using ``rte_pmd_rvu_lf_irq_register()`` +or remove interrupt handler using ``rte_pmd_rvu_lf_irq_unregister()``. +The IRQ numbers for which the interrupts are registered +is negotiated separately and is not in scope of the driver. + +RVU LF raw message processing +----------------------------- + +For processing of mailboxes received on RVU PF/VF application, +out-of-tree drivers can register/unregister callbacks +using ``rte_pmd_rvu_lf_msg_handler_register()`` +and ``rte_pmd_rvu_lf_msg_handler_unregister()``. +Required responses as per the request and message id received +can be filled in the callbacks. + +Once a RVU LF raw device is probed, a range of message ids can be configured +for which mailboxes will be sent using the API ``rte_pmd_rvu_lf_msg_id_range_set()``. + +The out-of-tree driver can call ``rte_pmd_rvu_lf_msg_process()`` to send a request +and receive corresponding response from the other side (PF/VF). +It accepts an opaque pointer of a request and its size which can be defined by user +and provides an opaque pointer for a response and its length. +PF and VF out-of-tree driver can define its own request and response +based on the message id of the mailbox. + +For sample usage of the API, refer ``rvu_lf_rawdev_selftest()``. + +Get BAR addresses +----------------- + +Out-of-tree drivers can retrieve PCI BAR addresses of the device +using the API function ``rte_pmd_rvu_lf_bar_get()``. +This helps PF/VF drivers to configure the registers of the hardware device. + +Self test +--------- + +On EAL initialization RVU LF devices will be probed +and populated into the raw devices. +The rawdev ID of the device can be obtained using invocation +of ``rte_rawdev_get_dev_id("NAME:x")`` from the test application, where: + +- NAME is the desired subsystem: use "RVU_LF". +- x is the device's bus id specified in "bus:device.func" (BDF) format. + BDF follows convention used by lspci i.e. bus, device and func + are specified using respectively two, two and one hex digit(s). + +Use this identifier for further rawdev function calls. + +Selftest rawdev API can be used to verify the mailbox communication +between PF and VF devices based applications. +There can be multiple VFs for a particular PF. +Each VF can send mailboxes to PF and PF can broadcast message to all VFs. diff --git a/doc/guides/rawdevs/index.rst b/doc/guides/rawdevs/index.rst index f34315f051..8e07cf4d6c 100644 --- a/doc/guides/rawdevs/index.rst +++ b/doc/guides/rawdevs/index.rst @@ -13,6 +13,7 @@ application through rawdev API. cnxk_bphy cnxk_gpio + cnxk_rvu_lf dpaa2_cmdif ifpga ntb diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst index 17b7332007..3f5f0e949c 100644 --- a/doc/guides/rel_notes/deprecation.rst +++ b/doc/guides/rel_notes/deprecation.rst @@ -124,19 +124,6 @@ Deprecation Notices The legacy actions should be removed once ``MODIFY_FIELD`` alternative is implemented in drivers. -* cryptodev: The Intel IPsec Multi-Buffer version will be bumped - to a minimum version of v1.4. - This will effect the KASUMI, SNOW3G, ZUC, AESNI GCM, AESNI MB and CHACHAPOLY - SW PMDs. - -* eventdev: The single-event (non-burst) enqueue and dequeue operations, - used by static inline burst enqueue and dequeue functions in ``rte_eventdev.h``, - will be removed in DPDK 23.11. - This simplification includes changing the layout and potentially also - the size of the public ``rte_event_fp_ops`` struct, breaking the ABI. - Since these functions are not called directly by the application, - the API remains unaffected. - * pipeline: The pipeline library legacy API (functions rte_pipeline_*) will be deprecated and subsequently removed in DPDK 24.11 release. Before this, the new pipeline library API (functions rte_swx_pipeline_*) diff --git a/doc/guides/rel_notes/release_24_11.rst b/doc/guides/rel_notes/release_24_11.rst index fa4822d928..5063badf39 100644 --- a/doc/guides/rel_notes/release_24_11.rst +++ b/doc/guides/rel_notes/release_24_11.rst @@ -80,6 +80,20 @@ New Features This addition provides an efficient and straightforward alternative for handling bitsets of intermediate sizes. +* **Added per-lcore static memory allocation facility.** + + Added EAL API ```` for statically allocating small, + frequently-accessed data structures, for which one instance should exist + for each EAL thread and registered non-EAL thread. + + With lcore variables, data is organized spatially on a per-lcore id basis, + rather than per library or PMD, avoiding the need for cache aligning + (or RTE_CACHE_GUARDing) data structures, which in turn + reduces CPU cache internal fragmentation, improving performance. + + Lcore variables are similar to thread-local storage (TLS, e.g. C11 ``_Thread_local``), + but decoupling the values' life time from that of the threads. + * **Extended service cores statistics.** Two new per-service counters are added to the service cores framework. @@ -100,6 +114,22 @@ New Features (and vice versa); freeing the same pointer twice in the same routine; freeing an object that was not created by allocation; etc. +* **Updated logging library** + + * The log subsystem is initialized earlier in startup so all messages go through the library. + + * If the application is a systemd service and the log output is being sent to standard error + then DPDK will switch to journal native protocol. + This allows the more data such as severity to be sent. + + * The syslog option has changed. + By default, messages are no longer sent to syslog unless the ``--syslog`` option is specified. + Syslog is also supported on FreeBSD (but not on Windows). + + * Log messages can be timestamped with ``--log-timestamp`` option. + + * Log messages can be colorized with the ``--log-color`` option. + * **Updated Marvell cnxk mempool driver.** * Added mempool driver support for CN20K SoC. @@ -142,6 +172,7 @@ New Features * Modified the PMD API that controls the LLQ header policy. * Replaced ``enable_llq``, ``normal_llq_hdr`` and ``large_llq_hdr`` devargs with a new shared devarg ``llq_policy`` that keeps the same logic. + * Added validation check for Rx packet descriptor consistency. * **Updated Cisco enic driver.** @@ -160,6 +191,25 @@ New Features * Added NT flow backend initialization. * Added initialization of FPGA modules related to flow HW offload. * Added basic handling of the virtual queues. + * Added flow handling support. + * Added statistics support. + * Added age flow action support. + * Added meter flow metering and flow policy support. + * Added flow actions update support. + * Added asynchronous flow support. + * Added MTU update support. + +* **Updated NVIDIA mlx5 net driver.** + + * Added ``rte_flow_async_create_by_index_with_pattern()`` support. + * Added jump to flow table index support. + +* **Added ZTE zxdh net driver [EXPERIMENTAL].** + + Added ethdev driver support for zxdh NX Series Ethernet Controller. + + * Ability to initialize the NIC. + * No datapath support. * **Added cryptodev queue pair reset support.** @@ -180,6 +230,8 @@ New Features * Added support for SM3 algorithm. * Added support for SM3 HMAC algorithm. * Added support for SM4 CBC, SM4 ECB and SM4 CTR algorithms. + * Bumped the minimum version requirement of Intel IPsec Multi-buffer library to v1.4. + Affected PMDs: KASUMI, SNOW3G, ZUC, AESNI GCM, AESNI MB and CHACHAPOLY. * **Updated openssl crypto driver.** @@ -200,6 +252,17 @@ New Features to check if the DMA device supports assigning fixed priority, allowing for better control over resource allocation and scheduling. +* **Updated Marvell cnxk DMA driver.** + + * Added support for DMA queue priority configuration. + +* **Added Marvell cnxk RVU LF rawdev driver.** + + Added a new raw device driver for Marvell cnxk based devices + to allow out-of-tree driver to manage RVU LF device. + It enables operations such as sending/receiving mailbox, + register and notify the interrupts, etc. + * **Added event device pre-scheduling support.** Added support for pre-scheduling of events to event ports @@ -231,12 +294,26 @@ New Features * Added independent enqueue feature. +* **Updated Marvell cnxk event device driver.** + + * Added eventdev driver support for CN20K SoC. + * **Added IPv4 network order lookup in the FIB library.** A new flag field is introduced in ``rte_fib_conf`` structure. This field is used to pass an extra configuration settings such as ability to lookup IPv4 addresses in network byte order. +* **Added RSS hash key generating API.** + + A new function ``rte_thash_gen_key`` is provided to modify the RSS hash key + to achieve better traffic distribution with RSS. + +* **Added per-CPU power management QoS interface.** + + Added per-CPU PM QoS interface to lower the resume latency + when wake up from idle state. + * **Added new API to register telemetry endpoint callbacks with private arguments.** A new ``rte_telemetry_register_cmd_arg`` function is available to pass an opaque value to @@ -287,10 +364,15 @@ API Changes releases: it handles key=value and only-key cases. * Both ``rte_kvargs_process`` and ``rte_kvargs_process_opt`` reject a NULL ``kvlist`` parameter. +* net: The IPv4 header structure ``rte_ipv4_hdr`` has been marked as two bytes aligned. + * net: The ICMP message types ``RTE_IP_ICMP_ECHO_REPLY`` and ``RTE_IP_ICMP_ECHO_REQUEST`` are marked as deprecated, and are replaced by ``RTE_ICMP_TYPE_ECHO_REPLY`` and ``RTE_ICMP_TYPE_ECHO_REQUEST``. +* net: The IPv6 header structure ``rte_ipv6_hdr`` and extension structures ``rte_ipv6_routing_ext`` + and ``rte_ipv6_fragment_ext`` have been marked as two bytes aligned. + * net: A new IPv6 address structure was introduced to replace ad-hoc ``uint8_t[16]`` arrays. The following libraries and symbols were modified: @@ -401,6 +483,9 @@ ABI Changes * eventdev: Added ``preschedule_type`` field to ``rte_event_dev_config`` structure. +* eventdev: Removed the single-event enqueue and dequeue function pointers + from ``rte_event_fp_fps``. + * graph: To accommodate node specific xstats counters, added ``xstat_cntrs``, ``xstat_desc`` and ``xstat_count`` to ``rte_graph_cluster_node_stats``, added new structure ``rte_node_xstats`` to ``rte_node_register`` and diff --git a/doc/guides/sample_app_ug/img/ptpclient.svg b/doc/guides/sample_app_ug/img/ptpclient.svg index fd78ef839b..20031b69d1 100644 --- a/doc/guides/sample_app_ug/img/ptpclient.svg +++ b/doc/guides/sample_app_ug/img/ptpclient.svg @@ -488,7 +488,7 @@ sodipodi:role="line" id="tspan7096" x="38.764343" - y="590.47479">master + y="590.47479">time transmitter slave + y="593.71478">time receiver `: The PTP client is another minimal implementation of a real world application. In this case the application is a PTP client that communicates with a PTP - master clock to synchronize time on a Network Interface Card (NIC) using the + time transmitter to synchronize time on a Network Interface Card (NIC) using the IEEE1588 protocol. * :doc:`Quality of Service (QoS) Scheduler`: The QoS diff --git a/doc/guides/sample_app_ug/l3_forward_power_man.rst b/doc/guides/sample_app_ug/l3_forward_power_man.rst index 9c9684fea7..82f301ee52 100644 --- a/doc/guides/sample_app_ug/l3_forward_power_man.rst +++ b/doc/guides/sample_app_ug/l3_forward_power_man.rst @@ -68,6 +68,10 @@ In this application, we introduce a heuristic algorithm that allows packet proce if there is no Rx packet received on recent polls. In this way, CPUIdle automatically forces the corresponding cores to enter deeper C-states instead of always running to the C0 state waiting for packets. +But user can set the CPU resume latency to control C-state selection. +Setting the CPU resume latency to 0 +can limit the CPU just to enter C0-state to improve performance, +which may increase power consumption of platform. .. note:: @@ -105,6 +109,8 @@ where, * --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues from which ports are mapped to which cores. +* --cpu-resume-latency LATENCY: set CPU resume latency to control C-state selection, 0 : just allow to enter C0-state. + * --max-pkt-len: optional, maximum packet length in decimal (64-9600) * --no-numa: optional, disables numa awareness diff --git a/doc/guides/sample_app_ug/ptpclient.rst b/doc/guides/sample_app_ug/ptpclient.rst index d47e942738..e03a8452d8 100644 --- a/doc/guides/sample_app_ug/ptpclient.rst +++ b/doc/guides/sample_app_ug/ptpclient.rst @@ -5,7 +5,7 @@ PTP Client Sample Application ============================= The PTP (Precision Time Protocol) client sample application is a simple -example of using the DPDK IEEE1588 API to communicate with a PTP master clock +example of using the DPDK IEEE1588 API to communicate with a PTP time transmitter to synchronize the time on the NIC and, optionally, on the Linux system. Note, PTP is a time syncing protocol and cannot be used within DPDK as a @@ -21,10 +21,10 @@ The PTP sample application is intended as a simple reference implementation of a PTP client using the DPDK IEEE1588 API. In order to keep the application simple the following assumptions are made: -* The first discovered master is the main for the session. +* The first discovered time transmitter is the main for the session. * Only L2 PTP packets are supported. * Only the PTP v2 protocol is supported. -* Only the slave clock is implemented. +* Only the time receiver clock is implemented. How the Application Works @@ -38,18 +38,24 @@ How the Application Works The PTP synchronization in the sample application works as follows: -* Master sends *Sync* message - the slave saves it as T2. -* Master sends *Follow Up* message and sends time of T1. -* Slave sends *Delay Request* frame to PTP Master and stores T3. -* Master sends *Delay Response* T4 time which is time of received T3. +* Time transmitter sends *Sync* message - the time receiver saves it as T2. +* Time transmitter sends *Follow Up* message and sends time of T1. +* Time receiver sends *Delay Request* frame to PTP time transmitter and stores T3. +* Time transmitter sends *Delay Response* T4 time which is time of received T3. -The adjustment for slave can be represented as: +The adjustment for time receiver can be represented as: adj = -[(T2-T1)-(T4 - T3)]/2 If the command line parameter ``-T 1`` is used the application also synchronizes the PTP PHC clock with the Linux kernel clock. +If the command line parameter ``-c 1`` is used, +the application will also use the servo of the local clock. +Only one type of servo is currently implemented, the PI controller. +Default 0 (not used). + + Compiling the Application ------------------------- @@ -65,14 +71,23 @@ To run the example in a ``linux`` environment: .. code-block:: console - .//examples/dpdk-ptpclient -l 1 -n 4 -- -p 0x1 -T 0 + .//examples/dpdk-ptpclient -l 1 -n 4 -- -p 0x1 -T 0 -c 1 Refer to *DPDK Getting Started Guide* for general information on running applications and the Environment Abstraction Layer (EAL) options. * ``-p portmask``: Hexadecimal portmask. -* ``-T 0``: Update only the PTP slave clock. -* ``-T 1``: Update the PTP slave clock and synchronize the Linux Kernel to the PTP clock. +* ``-T 0``: Update only the PTP time receiver clock. +* ``-T 1``: Update the PTP time receiver clock and synchronize the Linux Kernel to the PTP clock. +* ``-c 0``: Not used clock servo controller. +* ``-c 1``: The clock servo PI controller is used and the log will print information + about time transmitter offset. + Note that the PMD needs to support the ``rte_eth_timesync_adjust_freq()`` API + to enable the servo controller. + +Also, by adding ``-T 1`` and ``-c 1``, the time transmitter offset value printed in the log +will slowly converge and eventually stabilise at the nanosecond level. +The synchronisation accuracy is much higher compared to not using a servo controller. Code Explanation @@ -178,7 +193,7 @@ The forwarding loop can be interrupted and the application closed using PTP parsing ~~~~~~~~~~~ -The ``parse_ptp_frames()`` function processes PTP packets, implementing slave +The ``parse_ptp_frames()`` function processes PTP packets, implementing time receiver PTP IEEE1588 L2 functionality. .. literalinclude:: ../../../examples/ptpclient/ptpclient.c @@ -187,11 +202,12 @@ PTP IEEE1588 L2 functionality. :end-before: >8 End of function processes PTP packets. There are 3 types of packets on the RX path which we must parse to create a minimal -implementation of the PTP slave client: +implementation of the PTP time receiver client: * SYNC packet. * FOLLOW UP packet * DELAY RESPONSE packet. When we parse the *FOLLOW UP* packet we also create and send a *DELAY_REQUEST* packet. -Also when we parse the *DELAY RESPONSE* packet, and all conditions are met we adjust the PTP slave clock. +Also when we parse the *DELAY RESPONSE* packet, and all conditions are met +we adjust the PTP time receiver clock. diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index 1a9b812a7f..48717707a7 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -571,6 +571,9 @@ The command line options are: The default value is 0. Hairpin will use single port mode and implicit Tx flow mode. +* ``--hairpin-map=Rx port id:Rx queue:Tx port id:Tx queue:queues number`` + + Set explicit hairpin configuration. Testpmd Multi-Process Command-line Options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/baseband/acc/acc_common.h b/drivers/baseband/acc/acc_common.h index 0c249d5b93..bf218332be 100644 --- a/drivers/baseband/acc/acc_common.h +++ b/drivers/baseband/acc/acc_common.h @@ -106,7 +106,7 @@ #define ACC_MAX_FCW_SIZE 128 #define ACC_IQ_SIZE 4 -#define ACC_FCW_FFT_BLEN_3 28 +#define ACC_FCW_FFT_BLEN_VRB2 128 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */ #define ACC_N_ZC_1 66 /* N = 66 Zc for BG 1 */ @@ -795,7 +795,7 @@ alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d, sw_rings_base, ACC_SIZE_64MBYTE); next_64mb_align_addr_iova = sw_rings_base_iova + next_64mb_align_offset; - sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size; + sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size - 1; /* Check if the end of the sw ring memory block is before the * start of next 64MB aligned mem address diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c index 0455320c2a..eb9892ff31 100644 --- a/drivers/baseband/acc/rte_vrb_pmd.c +++ b/drivers/baseband/acc/rte_vrb_pmd.c @@ -1006,7 +1006,7 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, case RTE_BBDEV_OP_FFT: fcw_len = ACC_FCW_FFT_BLEN; if (q->d->device_variant == VRB2_VARIANT) - fcw_len = ACC_FCW_FFT_BLEN_3; + fcw_len = ACC_FCW_FFT_BLEN_VRB2; break; case RTE_BBDEV_OP_MLDTS: fcw_len = ACC_FCW_MLDTS_BLEN; @@ -1402,7 +1402,11 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) RTE_BBDEV_FFT_FP16_INPUT | RTE_BBDEV_FFT_FP16_OUTPUT | RTE_BBDEV_FFT_POWER_MEAS | - RTE_BBDEV_FFT_WINDOWING_BYPASS, + RTE_BBDEV_FFT_WINDOWING_BYPASS | + RTE_BBDEV_FFT_TIMING_OFFSET_PER_CS | + RTE_BBDEV_FFT_TIMING_ERROR | + RTE_BBDEV_FFT_DEWINDOWING | + RTE_BBDEV_FFT_FREQ_RESAMPLING, .num_buffers_src = 1, .num_buffers_dst = 1, .fft_windows_num = ACC_MAX_FFT_WIN, @@ -1795,6 +1799,9 @@ vrb_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, fcw->hcout_offset = 0; } + /* Force saturation to 6 bits LLR. */ + fcw->saturate_input = 1; + fcw->tb_crc_select = 0; if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK)) fcw->tb_crc_select = 2; @@ -3725,6 +3732,8 @@ vrb1_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft *fcw) static inline void vrb2_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft_3 *fcw) { + uint8_t cs; + fcw->in_frame_size = op->fft.input_sequence_size; fcw->leading_pad_size = op->fft.input_leading_padding; fcw->out_frame_size = op->fft.output_sequence_size; @@ -3760,6 +3769,16 @@ vrb2_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft_3 *fcw) fcw->bypass = 3; else fcw->bypass = 0; + + fcw->enable_dewin = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_DEWINDOWING); + fcw->freq_resample_mode = op->fft.freq_resample_mode; + fcw->depad_output_size = fcw->freq_resample_mode == 0 ? + op->fft.output_sequence_size : op->fft.output_depadded_size; + for (cs = 0; cs < RTE_BBDEV_MAX_CS; cs++) { + fcw->cs_theta_0[cs] = op->fft.cs_theta_0[cs]; + fcw->cs_theta_d[cs] = op->fft.cs_theta_d[cs]; + fcw->cs_time_offset[cs] = op->fft.time_offset[cs]; + } } static inline int @@ -3782,8 +3801,14 @@ vrb_dma_desc_fft_fill(struct rte_bbdev_fft_op *op, /* FCW already done */ acc_header_init(desc); - RTE_SET_USED(win_input); - RTE_SET_USED(win_offset); + if (win_en && win_input) { + desc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(win_input, *win_offset); + desc->data_ptrs[bd_idx].blen = op->fft.output_depadded_size * 2; + desc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_DEWIN_IN; + desc->data_ptrs[bd_idx].last = 0; + desc->data_ptrs[bd_idx].dma_ext = 0; + bd_idx++; + } desc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(input, *in_offset); desc->data_ptrs[bd_idx].blen = op->fft.input_sequence_size * ACC_IQ_SIZE; diff --git a/drivers/baseband/acc/vrb2_vf_enum.h b/drivers/baseband/acc/vrb2_vf_enum.h index 9c6e451010..1cc6986c67 100644 --- a/drivers/baseband/acc/vrb2_vf_enum.h +++ b/drivers/baseband/acc/vrb2_vf_enum.h @@ -18,8 +18,8 @@ enum { VRB2_VfHiInfoRingIntWrEnVf = 0x00000020, VRB2_VfHiInfoRingPf2VfWrEnVf = 0x00000024, VRB2_VfHiMsixVectorMapperVf = 0x00000060, - VRB2_VfHiDeviceStatus = 0x00000068, - VRB2_VfHiInterruptSrc = 0x00000070, + VRB2_VfHiDeviceStatus = 0x00000064, + VRB2_VfHiInterruptSrc = 0x00000068, VRB2_VfDmaFec5GulDescBaseLoRegVf = 0x00000120, VRB2_VfDmaFec5GulDescBaseHiRegVf = 0x00000124, VRB2_VfDmaFec5GulRespPtrLoRegVf = 0x00000128, diff --git a/drivers/bus/fslmc/bus_fslmc_driver.h b/drivers/bus/fslmc/bus_fslmc_driver.h index 3095458133..1d4ce4785f 100644 --- a/drivers/bus/fslmc/bus_fslmc_driver.h +++ b/drivers/bus/fslmc/bus_fslmc_driver.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright 2016,2021 NXP + * Copyright 2016,2021-2023 NXP * */ @@ -33,9 +33,6 @@ #include -#include "portal/dpaa2_hw_pvt.h" -#include "portal/dpaa2_hw_dpio.h" - #ifdef __cplusplus extern "C" { #endif @@ -92,22 +89,6 @@ enum rte_dpaa2_dev_type { DPAA2_DEVTYPE_MAX, }; -TAILQ_HEAD(rte_dpaa2_object_list, rte_dpaa2_object); - -typedef int (*rte_dpaa2_obj_create_t)(int vdev_fd, - struct vfio_device_info *obj_info, - int object_id); - -/** - * A structure describing a DPAA2 object. - */ -struct rte_dpaa2_object { - TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */ - const char *name; /**< Name of Object. */ - enum rte_dpaa2_dev_type dev_type; /**< Type of device */ - rte_dpaa2_obj_create_t create; -}; - /** * A structure describing a DPAA2 device. */ @@ -123,6 +104,7 @@ struct rte_dpaa2_device { enum rte_dpaa2_dev_type dev_type; /**< Device Type */ uint16_t object_id; /**< DPAA2 Object ID */ enum rte_dpaa2_dev_type ep_dev_type; /**< Endpoint Device Type */ + struct dpaa2_dprc_dev *container; uint16_t ep_object_id; /**< Endpoint DPAA2 Object ID */ char ep_name[RTE_DEV_NAME_MAX_LEN]; struct rte_intr_handle *intr_handle; /**< Interrupt handle */ @@ -130,10 +112,34 @@ struct rte_dpaa2_device { char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/ }; +typedef int (*rte_dpaa2_obj_create_t)(int vdev_fd, + struct vfio_device_info *obj_info, + struct rte_dpaa2_device *dev); + +typedef void (*rte_dpaa2_obj_close_t)(int object_id); + typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv, struct rte_dpaa2_device *dpaa2_dev); typedef int (*rte_dpaa2_remove_t)(struct rte_dpaa2_device *dpaa2_dev); +TAILQ_HEAD(rte_dpaa2_object_list, rte_dpaa2_object); + +/** + * A structure describing a DPAA2 object. + */ +struct rte_dpaa2_object { + TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */ + const char *name; /**< Name of Object. */ + enum rte_dpaa2_dev_type dev_type; /**< Type of device */ + rte_dpaa2_obj_create_t create; + rte_dpaa2_obj_close_t close; +}; + +int +rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size); +int +rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size); + /** * A structure describing a DPAA2 driver. */ @@ -146,6 +152,32 @@ struct rte_dpaa2_driver { rte_dpaa2_remove_t remove; }; +int +rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size); +__rte_internal +int +rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size); +__rte_internal +uint64_t +rte_fslmc_cold_mem_vaddr_to_iova(void *vaddr, + uint64_t size); +__rte_internal +void * +rte_fslmc_cold_mem_iova_to_vaddr(uint64_t iova, + uint64_t size); +__rte_internal +__rte_hot uint64_t +rte_fslmc_mem_vaddr_to_iova(void *vaddr); +__rte_internal +__rte_hot void * +rte_fslmc_mem_iova_to_vaddr(uint64_t iova); +__rte_internal +uint64_t +rte_fslmc_io_vaddr_to_iova(void *vaddr); +__rte_internal +void * +rte_fslmc_io_iova_to_vaddr(uint64_t iova); + /** * Register a DPAA2 driver. * diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c index 097d6dca08..68ad2b801e 100644 --- a/drivers/bus/fslmc/fslmc_bus.c +++ b/drivers/bus/fslmc/fslmc_bus.c @@ -27,7 +27,6 @@ #define FSLMC_BUS_NAME fslmc struct rte_fslmc_bus rte_fslmc_bus; -uint8_t dpaa2_virt_mode; #define DPAA2_SEQN_DYNFIELD_NAME "dpaa2_seqn_dynfield" int dpaa2_seqn_dynfield_offset = -1; @@ -244,8 +243,6 @@ rte_fslmc_parse(const char *name, void *addr) uint8_t sep_exists = 0; int ret = -1; - DPAA2_BUS_DEBUG("Parsing dev=(%s)", name); - /* There are multiple ways this can be called, with bus:dev, name=dev * or just dev. In all cases, the 'addr' is actually a string. */ @@ -318,6 +315,7 @@ rte_fslmc_scan(void) struct dirent *entry; static int process_once; int groupid; + char *group_name; if (process_once) { DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning"); @@ -325,12 +323,20 @@ rte_fslmc_scan(void) } process_once = 1; - ret = fslmc_get_container_group(&groupid); + /* Now we only support single group per process.*/ + group_name = getenv("DPRC"); + if (!group_name) { + DPAA2_BUS_DEBUG("DPAA2: DPRC not available"); + ret = -EINVAL; + goto scan_fail; + } + + ret = fslmc_get_container_group(group_name, &groupid); if (ret != 0) goto scan_fail; /* Scan devices on the group */ - sprintf(fslmc_dirpath, "%s/%s", SYSFS_FSL_MC_DEVICES, fslmc_container); + sprintf(fslmc_dirpath, "%s/%s", SYSFS_FSL_MC_DEVICES, group_name); dir = opendir(fslmc_dirpath); if (!dir) { DPAA2_BUS_ERR("Unable to open VFIO group directory"); @@ -338,7 +344,7 @@ rte_fslmc_scan(void) } /* Scan the DPRC container object */ - ret = scan_one_fslmc_device(fslmc_container); + ret = scan_one_fslmc_device(group_name); if (ret != 0) { /* Error in parsing directory - exit gracefully */ goto scan_fail_cleanup; @@ -384,6 +390,18 @@ rte_fslmc_match(struct rte_dpaa2_driver *dpaa2_drv, return 1; } +static int +rte_fslmc_close(void) +{ + int ret = 0; + + ret = fslmc_vfio_close_group(); + if (ret) + DPAA2_BUS_INFO("Unable to close devices %d", ret); + + return 0; +} + static int rte_fslmc_probe(void) { @@ -419,7 +437,7 @@ rte_fslmc_probe(void) * install callback handler. */ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - ret = rte_fslmc_vfio_dmamap(); + ret = fslmc_vfio_dmamap(); if (ret) { DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret); @@ -437,22 +455,6 @@ rte_fslmc_probe(void) probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST; - /* In case of PA, the FD addresses returned by qbman APIs are physical - * addresses, which need conversion into equivalent VA address for - * rte_mbuf. For that, a table (a serial array, in memory) is used to - * increase translation efficiency. - * This has to be done before probe as some device initialization - * (during) probe allocate memory (dpaa2_sec) which needs to be pinned - * to this table. - * - * Error is ignored as relevant logs are handled within dpaax and - * handling for unavailable dpaax table too is transparent to caller. - * - * And, the IOVA table is only applicable in case of PA mode. - */ - if (rte_eal_iova_mode() == RTE_IOVA_PA) - dpaax_iova_table_populate(); - TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) { TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) { ret = rte_fslmc_match(drv, dev); @@ -487,9 +489,6 @@ rte_fslmc_probe(void) } } - if (rte_eal_iova_mode() == RTE_IOVA_VA) - dpaa2_virt_mode = 1; - return 0; } @@ -538,12 +537,6 @@ rte_fslmc_driver_register(struct rte_dpaa2_driver *driver) void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver) { - /* Cleanup the PA->VA Translation table; From wherever this function - * is called from. - */ - if (rte_eal_iova_mode() == RTE_IOVA_PA) - dpaax_iova_table_depopulate(); - TAILQ_REMOVE(&rte_fslmc_bus.driver_list, driver, next); } @@ -579,13 +572,12 @@ rte_dpaa2_get_iommu_class(void) bool is_vfio_noiommu_enabled = 1; bool has_iova_va; + if (rte_eal_iova_mode() == RTE_IOVA_PA) + return RTE_IOVA_PA; + if (TAILQ_EMPTY(&rte_fslmc_bus.device_list)) return RTE_IOVA_DC; -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - return RTE_IOVA_PA; -#endif - /* check if all devices on the bus support Virtual addressing or not */ has_iova_va = fslmc_all_device_support_iova(); @@ -664,6 +656,7 @@ struct rte_fslmc_bus rte_fslmc_bus = { .bus = { .scan = rte_fslmc_scan, .probe = rte_fslmc_probe, + .cleanup = rte_fslmc_close, .parse = rte_fslmc_parse, .find_device = rte_fslmc_find_device, .get_iommu_class = rte_dpaa2_get_iommu_class, diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c index 6981679a2d..c581dcc4e2 100644 --- a/drivers/bus/fslmc/fslmc_vfio.c +++ b/drivers/bus/fslmc/fslmc_vfio.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2021 NXP + * Copyright 2016-2024 NXP * */ @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,7 @@ #include #include #include +#include #include "private.h" #include "fslmc_vfio.h" @@ -39,16 +41,47 @@ #include "portal/dpaa2_hw_pvt.h" #include "portal/dpaa2_hw_dpio.h" -#define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */ +#define FSLMC_VFIO_MP "fslmc_vfio_mp_sync" -/* Number of VFIO containers & groups with in */ -static struct fslmc_vfio_group vfio_group; -static struct fslmc_vfio_container vfio_container; -static int container_device_fd; -char *fslmc_container; -static int fslmc_iommu_type; -static uint32_t *msi_intr_vaddr; -void *(*rte_mcp_ptr_list); +/* Container is composed by multiple groups, however, + * now each process only supports single group with in container. + */ +static struct fslmc_vfio_container s_vfio_container; +/* Currently we only support single group/process. */ +static const char *fslmc_group; /* dprc.x*/ +static void *(*rte_mcp_ptr_list); + +struct fslmc_dmaseg { + uint64_t vaddr; + uint64_t iova; + uint64_t size; + + TAILQ_ENTRY(fslmc_dmaseg) next; +}; + +TAILQ_HEAD(fslmc_dmaseg_list, fslmc_dmaseg); + +struct fslmc_dmaseg_list fslmc_memsegs = + TAILQ_HEAD_INITIALIZER(fslmc_memsegs); +struct fslmc_dmaseg_list fslmc_iosegs = + TAILQ_HEAD_INITIALIZER(fslmc_iosegs); + +static uint64_t fslmc_mem_va2iova = RTE_BAD_IOVA; +static int fslmc_mem_map_num; + +struct fslmc_mem_param { + struct vfio_mp_param mp_param; + struct fslmc_dmaseg_list memsegs; + struct fslmc_dmaseg_list iosegs; + uint64_t mem_va2iova; + int mem_map_num; +}; + +enum { + FSLMC_VFIO_SOCKET_REQ_CONTAINER = 0x100, + FSLMC_VFIO_SOCKET_REQ_GROUP, + FSLMC_VFIO_SOCKET_REQ_MEM +}; void * dpaa2_get_mcp_ptr(int portal_idx) @@ -62,6 +95,64 @@ dpaa2_get_mcp_ptr(int portal_idx) static struct rte_dpaa2_object_list dpaa2_obj_list = TAILQ_HEAD_INITIALIZER(dpaa2_obj_list); +static uint64_t +fslmc_io_virt2phy(const void *virtaddr) +{ + FILE *fp = fopen("/proc/self/maps", "r"); + char *line = NULL; + size_t linesz; + uint64_t start, end, phy; + const uint64_t va = (const uint64_t)virtaddr; + char tmp[1024]; + int ret; + + if (!fp) + return RTE_BAD_IOVA; + while (getdelim(&line, &linesz, '\n', fp) > 0) { + char *ptr = line; + int n; + + /** Parse virtual address range.*/ + n = 0; + while (*ptr && !isspace(*ptr)) { + tmp[n] = *ptr; + ptr++; + n++; + } + tmp[n] = 0; + ret = sscanf(tmp, "%" SCNx64 "-%" SCNx64, &start, &end); + if (ret != 2) + continue; + if (va < start || va >= end) + continue; + + /** This virtual address is in this segment.*/ + while (*ptr == ' ' || *ptr == 'r' || + *ptr == 'w' || *ptr == 's' || + *ptr == 'p' || *ptr == 'x' || + *ptr == '-') + ptr++; + + /** Extract phy address*/ + n = 0; + while (*ptr && !isspace(*ptr)) { + tmp[n] = *ptr; + ptr++; + n++; + } + tmp[n] = 0; + phy = strtoul(tmp, 0, 16); + if (!phy) + continue; + + fclose(fp); + return phy + va - start; + } + + fclose(fp); + return RTE_BAD_IOVA; +} + /*register a fslmc bus based dpaa2 driver */ void rte_fslmc_object_register(struct rte_dpaa2_object *object) @@ -71,140 +162,914 @@ rte_fslmc_object_register(struct rte_dpaa2_object *object) TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next); } +static const char * +fslmc_vfio_get_group_name(void) +{ + return fslmc_group; +} + +static void +fslmc_vfio_set_group_name(const char *group_name) +{ + fslmc_group = group_name; +} + +static int +fslmc_vfio_add_group(int vfio_group_fd, + int iommu_group_num, const char *group_name) +{ + struct fslmc_vfio_group *group; + + group = rte_zmalloc(NULL, sizeof(struct fslmc_vfio_group), 0); + if (!group) + return -ENOMEM; + group->fd = vfio_group_fd; + group->groupid = iommu_group_num; + rte_strscpy(group->group_name, group_name, sizeof(group->group_name)); + if (rte_vfio_noiommu_is_enabled() > 0) + group->iommu_type = RTE_VFIO_NOIOMMU; + else + group->iommu_type = VFIO_TYPE1_IOMMU; + LIST_INSERT_HEAD(&s_vfio_container.groups, group, next); + + return 0; +} + +static int +fslmc_vfio_clear_group(int vfio_group_fd) +{ + struct fslmc_vfio_group *group; + struct fslmc_vfio_device *dev; + int clear = 0; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->fd == vfio_group_fd) { + LIST_FOREACH(dev, &group->vfio_devices, next) + LIST_REMOVE(dev, next); + + close(vfio_group_fd); + LIST_REMOVE(group, next); + rte_free(group); + clear = 1; + + break; + } + } + + if (LIST_EMPTY(&s_vfio_container.groups)) { + if (s_vfio_container.fd > 0) + close(s_vfio_container.fd); + + s_vfio_container.fd = -1; + } + if (clear) + return 0; + + return -ENODEV; +} + +static int +fslmc_vfio_connect_container(int vfio_group_fd) +{ + struct fslmc_vfio_group *group; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->fd == vfio_group_fd) { + group->connected = 1; + + return 0; + } + } + + return -ENODEV; +} + +static int +fslmc_vfio_container_connected(int vfio_group_fd) +{ + struct fslmc_vfio_group *group; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->fd == vfio_group_fd) { + if (group->connected) + return 1; + } + } + return 0; +} + +static int +fslmc_vfio_iommu_type(int vfio_group_fd) +{ + struct fslmc_vfio_group *group; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->fd == vfio_group_fd) + return group->iommu_type; + } + return -ENODEV; +} + +static int +fslmc_vfio_group_fd_by_name(const char *group_name) +{ + struct fslmc_vfio_group *group; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (!strcmp(group->group_name, group_name)) + return group->fd; + } + return -ENODEV; +} + +static int +fslmc_vfio_group_fd_by_id(int group_id) +{ + struct fslmc_vfio_group *group; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->groupid == group_id) + return group->fd; + } + return -ENODEV; +} + +static int +fslmc_vfio_group_add_dev(int vfio_group_fd, + int dev_fd, const char *name) +{ + struct fslmc_vfio_group *group; + struct fslmc_vfio_device *dev; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->fd == vfio_group_fd) { + dev = rte_zmalloc(NULL, + sizeof(struct fslmc_vfio_device), 0); + dev->fd = dev_fd; + rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name)); + LIST_INSERT_HEAD(&group->vfio_devices, dev, next); + return 0; + } + } + return -ENODEV; +} + +static int +fslmc_vfio_group_remove_dev(int vfio_group_fd, + const char *name) +{ + struct fslmc_vfio_group *group = NULL; + struct fslmc_vfio_device *dev; + int removed = 0; + + LIST_FOREACH(group, &s_vfio_container.groups, next) { + if (group->fd == vfio_group_fd) + break; + } + + if (group) { + LIST_FOREACH(dev, &group->vfio_devices, next) { + if (!strcmp(dev->dev_name, name)) { + LIST_REMOVE(dev, next); + removed = 1; + break; + } + } + } + + if (removed) + return 0; + + return -ENODEV; +} + +static int +fslmc_vfio_container_fd(void) +{ + return s_vfio_container.fd; +} + +static int +fslmc_get_group_id(const char *group_name, + int *groupid) +{ + int ret; + + /* get group number */ + ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES, + group_name, groupid); + if (ret <= 0) { + DPAA2_BUS_ERR("Find %s IOMMU group", group_name); + if (ret < 0) + return ret; + + return -EIO; + } + + DPAA2_BUS_DEBUG("GROUP(%s) has VFIO iommu group id = %d", + group_name, *groupid); + + return 0; +} + +static int +fslmc_vfio_open_group_fd(const char *group_name) +{ + int vfio_group_fd; + char filename[PATH_MAX]; + struct rte_mp_msg mp_req, *mp_rep; + struct rte_mp_reply mp_reply = {0}; + struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; + struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param; + int iommu_group_num, ret; + + vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name); + if (vfio_group_fd > 0) + return vfio_group_fd; + + ret = fslmc_get_group_id(group_name, &iommu_group_num); + if (ret) + return ret; + /* if primary, try to open the group */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* try regular group format */ + snprintf(filename, sizeof(filename), + VFIO_GROUP_FMT, iommu_group_num); + vfio_group_fd = open(filename, O_RDWR); + + goto add_vfio_group; + } + /* if we're in a secondary process, request group fd from the primary + * process via mp channel. + */ + p->req = FSLMC_VFIO_SOCKET_REQ_GROUP; + p->group_num = iommu_group_num; + rte_strscpy(mp_req.name, FSLMC_VFIO_MP, sizeof(mp_req.name)); + mp_req.len_param = sizeof(*p); + mp_req.num_fds = 0; + + vfio_group_fd = -1; + if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 && + mp_reply.nb_received == 1) { + mp_rep = &mp_reply.msgs[0]; + p = (struct vfio_mp_param *)mp_rep->param; + if (p->result == SOCKET_OK && mp_rep->num_fds == 1) + vfio_group_fd = mp_rep->fds[0]; + else if (p->result == SOCKET_NO_FD) + DPAA2_BUS_ERR("Bad VFIO group fd"); + } + + free(mp_reply.msgs); + +add_vfio_group: + if (vfio_group_fd < 0) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + DPAA2_BUS_ERR("Open VFIO group(%s) failed(%d)", + filename, vfio_group_fd); + } else { + DPAA2_BUS_ERR("Cannot request group fd(%d)", + vfio_group_fd); + } + } else { + ret = fslmc_vfio_add_group(vfio_group_fd, iommu_group_num, + group_name); + if (ret) { + close(vfio_group_fd); + return ret; + } + } + + return vfio_group_fd; +} + +static int +fslmc_vfio_check_extensions(int vfio_container_fd) +{ + int ret; + uint32_t idx, n_extensions = 0; + static const int type_id[] = {RTE_VFIO_TYPE1, RTE_VFIO_SPAPR, + RTE_VFIO_NOIOMMU}; + static const char * const type_id_nm[] = {"Type 1", + "sPAPR", "No-IOMMU"}; + + for (idx = 0; idx < RTE_DIM(type_id); idx++) { + ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, + type_id[idx]); + if (ret < 0) { + DPAA2_BUS_ERR("Could not get IOMMU type, error %i (%s)", + errno, strerror(errno)); + close(vfio_container_fd); + return -errno; + } else if (ret == 1) { + /* we found a supported extension */ + n_extensions++; + } + DPAA2_BUS_DEBUG("IOMMU type %d (%s) is %s", + type_id[idx], type_id_nm[idx], + ret ? "supported" : "not supported"); + } + + /* if we didn't find any supported IOMMU types, fail */ + if (!n_extensions) { + close(vfio_container_fd); + return -EIO; + } + + return 0; +} + +static int +fslmc_vfio_open_container_fd(void) +{ + int ret, vfio_container_fd; + struct rte_mp_msg mp_req, *mp_rep; + struct rte_mp_reply mp_reply = {0}; + struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; + struct vfio_mp_param *p = (void *)mp_req.param; + + if (fslmc_vfio_container_fd() > 0) + return fslmc_vfio_container_fd(); + + /* if we're in a primary process, try to open the container */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR); + if (vfio_container_fd < 0) { + DPAA2_BUS_ERR("Open VFIO container(%s), err(%d)", + VFIO_CONTAINER_PATH, vfio_container_fd); + ret = vfio_container_fd; + goto err_exit; + } + + /* check VFIO API version */ + ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION); + if (ret < 0) { + DPAA2_BUS_ERR("Get VFIO API version(%d)", + ret); + } else if (ret != VFIO_API_VERSION) { + DPAA2_BUS_ERR("Unsupported VFIO API version(%d)", + ret); + ret = -ENOTSUP; + } + if (ret < 0) { + close(vfio_container_fd); + goto err_exit; + } + + ret = fslmc_vfio_check_extensions(vfio_container_fd); + if (ret) { + DPAA2_BUS_ERR("Unsupported IOMMU extensions found(%d)", + ret); + close(vfio_container_fd); + goto err_exit; + } + + goto success_exit; + } + /* + * if we're in a secondary process, request container fd from the + * primary process via mp channel + */ + p->req = FSLMC_VFIO_SOCKET_REQ_CONTAINER; + rte_strscpy(mp_req.name, FSLMC_VFIO_MP, sizeof(mp_req.name)); + mp_req.len_param = sizeof(*p); + mp_req.num_fds = 0; + + vfio_container_fd = -1; + ret = rte_mp_request_sync(&mp_req, &mp_reply, &ts); + if (ret) + goto err_exit; + + if (mp_reply.nb_received != 1) { + ret = -EIO; + goto err_exit; + } + + mp_rep = &mp_reply.msgs[0]; + p = (void *)mp_rep->param; + if (p->result == SOCKET_OK && mp_rep->num_fds == 1) { + vfio_container_fd = mp_rep->fds[0]; + free(mp_reply.msgs); + } + +success_exit: + s_vfio_container.fd = vfio_container_fd; + + return vfio_container_fd; + +err_exit: + if (mp_reply.msgs) + free(mp_reply.msgs); + DPAA2_BUS_ERR("Open container fd err(%d)", ret); + return ret; +} + int -fslmc_get_container_group(int *groupid) +fslmc_get_container_group(const char *group_name, + int *groupid) { int ret; - char *container; - if (!fslmc_container) { - container = getenv("DPRC"); - if (container == NULL) { - DPAA2_BUS_DEBUG("DPAA2: DPRC not available"); - return -EINVAL; + if (!group_name) { + DPAA2_BUS_ERR("No group name provided!"); + + return -EINVAL; + } + ret = fslmc_get_group_id(group_name, groupid); + if (ret) + return ret; + + fslmc_vfio_set_group_name(group_name); + + return 0; +} + +static int +fslmc_vfio_mp_primary(const struct rte_mp_msg *msg, + const void *peer) +{ + int fd = -1; + int ret; + struct rte_mp_msg reply; + struct vfio_mp_param *r = (void *)reply.param; + const struct vfio_mp_param *m = (const void *)msg->param; + struct fslmc_mem_param *map; + + if (msg->len_param != sizeof(*m)) { + DPAA2_BUS_ERR("Invalid msg size(%d) for req(%d)", + msg->len_param, m->req); + return -EINVAL; + } + + memset(&reply, 0, sizeof(reply)); + + switch (m->req) { + case FSLMC_VFIO_SOCKET_REQ_GROUP: + r->req = FSLMC_VFIO_SOCKET_REQ_GROUP; + r->group_num = m->group_num; + fd = fslmc_vfio_group_fd_by_id(m->group_num); + if (fd < 0) { + r->result = SOCKET_ERR; + } else if (!fd) { + /* if group exists but isn't bound to VFIO driver */ + r->result = SOCKET_NO_FD; + } else { + /* if group exists and is bound to VFIO driver */ + r->result = SOCKET_OK; + reply.num_fds = 1; + reply.fds[0] = fd; + } + reply.len_param = sizeof(*r); + break; + case FSLMC_VFIO_SOCKET_REQ_CONTAINER: + r->req = FSLMC_VFIO_SOCKET_REQ_CONTAINER; + fd = fslmc_vfio_container_fd(); + if (fd <= 0) { + r->result = SOCKET_ERR; + } else { + r->result = SOCKET_OK; + reply.num_fds = 1; + reply.fds[0] = fd; + } + reply.len_param = sizeof(*r); + break; + case FSLMC_VFIO_SOCKET_REQ_MEM: + map = (void *)reply.param; + r = &map->mp_param; + r->req = FSLMC_VFIO_SOCKET_REQ_MEM; + r->result = SOCKET_OK; + map->memsegs = fslmc_memsegs; + map->iosegs = fslmc_iosegs; + map->mem_va2iova = fslmc_mem_va2iova; + map->mem_map_num = fslmc_mem_map_num; + reply.len_param = sizeof(struct fslmc_mem_param); + break; + default: + DPAA2_BUS_ERR("VFIO received invalid message(%08x)", + m->req); + return -ENOTSUP; + } + + rte_strscpy(reply.name, FSLMC_VFIO_MP, sizeof(reply.name)); + ret = rte_mp_reply(&reply, peer); + + return ret; +} + +static int +fslmc_vfio_mp_sync_mem_req(void) +{ + struct rte_mp_msg mp_req, *mp_rep; + struct rte_mp_reply mp_reply = {0}; + struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; + int ret = 0; + struct vfio_mp_param *mp_param; + struct fslmc_mem_param *mem_rsp; + + mp_param = (void *)mp_req.param; + memset(&mp_req, 0, sizeof(struct rte_mp_msg)); + mp_param->req = FSLMC_VFIO_SOCKET_REQ_MEM; + rte_strscpy(mp_req.name, FSLMC_VFIO_MP, sizeof(mp_req.name)); + mp_req.len_param = sizeof(struct vfio_mp_param); + if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 && + mp_reply.nb_received == 1) { + mp_rep = &mp_reply.msgs[0]; + mem_rsp = (struct fslmc_mem_param *)mp_rep->param; + if (mem_rsp->mp_param.result == SOCKET_OK) { + fslmc_memsegs = mem_rsp->memsegs; + fslmc_mem_va2iova = mem_rsp->mem_va2iova; + fslmc_mem_map_num = mem_rsp->mem_map_num; + } else { + DPAA2_BUS_ERR("Bad MEM SEG"); + ret = -EINVAL; } + } else { + ret = -EINVAL; + } + free(mp_reply.msgs); + + return ret; +} + +static int +fslmc_vfio_mp_sync_setup(void) +{ + int ret; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + ret = rte_mp_action_register(FSLMC_VFIO_MP, + fslmc_vfio_mp_primary); + if (ret && rte_errno != ENOTSUP) + return ret; + } else { + ret = fslmc_vfio_mp_sync_mem_req(); + if (ret) + return ret; + } + + return 0; +} + +static int +vfio_connect_container(int vfio_container_fd, + int vfio_group_fd) +{ + int ret; + int iommu_type; + + if (fslmc_vfio_container_connected(vfio_group_fd)) { + DPAA2_BUS_WARN("VFIO FD(%d) has connected to container", + vfio_group_fd); + return 0; + } + + iommu_type = fslmc_vfio_iommu_type(vfio_group_fd); + if (iommu_type < 0) { + DPAA2_BUS_ERR("Get iommu type(%d)", iommu_type); + + return iommu_type; + } + + /* Check whether support for SMMU type IOMMU present or not */ + ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, iommu_type); + if (ret <= 0) { + DPAA2_BUS_ERR("Unsupported IOMMU type(%d) ret(%d), err(%d)", + iommu_type, ret, -errno); + return -EINVAL; + } + + ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER, + &vfio_container_fd); + if (ret) { + DPAA2_BUS_ERR("Set group container ret(%d), err(%d)", + ret, -errno); + + return ret; + } + + ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, iommu_type); + if (ret) { + DPAA2_BUS_ERR("Set iommu ret(%d), err(%d)", + ret, -errno); + + return ret; + } + + return fslmc_vfio_connect_container(vfio_group_fd); +} + +static int +fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len) +{ + struct vfio_iommu_type1_dma_map dma_map = { + .argsz = sizeof(struct vfio_iommu_type1_dma_map), + .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, + }; + int ret, fd, is_io = 0; + const char *group_name = fslmc_vfio_get_group_name(); + struct fslmc_dmaseg *dmaseg = NULL; + uint64_t phy = 0; + + if (rte_eal_iova_mode() == RTE_IOVA_VA) { + if (vaddr != iovaddr) { + DPAA2_BUS_ERR("IOVA:VA(%" PRIx64 " : %" PRIx64 ") %s", + iovaddr, vaddr, + "should be 1:1 for VA mode"); - if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) { - DPAA2_BUS_ERR("Invalid container name: %s", container); - return -1; + return -EINVAL; } + } - fslmc_container = strdup(container); - if (!fslmc_container) { - DPAA2_BUS_ERR("Mem alloc failure; Container name"); + phy = rte_mem_virt2phy((const void *)(uintptr_t)vaddr); + if (phy == RTE_BAD_IOVA) { + phy = fslmc_io_virt2phy((const void *)(uintptr_t)vaddr); + if (phy == RTE_BAD_IOVA) return -ENOMEM; + is_io = 1; + } else if (fslmc_mem_va2iova != RTE_BAD_IOVA && + fslmc_mem_va2iova != (iovaddr - vaddr)) { + DPAA2_BUS_WARN("Multiple MEM PA<->VA conversions."); + } + DPAA2_BUS_DEBUG("%s(%zu): VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")", + is_io ? "DMA IO map size" : "DMA MEM map size", + len, vaddr, iovaddr, phy); + + if (is_io) + goto io_mapping_check; + + TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) { + if (!((vaddr + len) <= dmaseg->vaddr || + (dmaseg->vaddr + dmaseg->size) <= vaddr)) { + DPAA2_BUS_ERR("MEM: New VA Range(%" PRIx64 " ~ %" PRIx64 ")", + vaddr, vaddr + len); + DPAA2_BUS_ERR("MEM: Overlap with (%" PRIx64 " ~ %" PRIx64 ")", + dmaseg->vaddr, + dmaseg->vaddr + dmaseg->size); + return -EEXIST; + } + if (!((iovaddr + len) <= dmaseg->iova || + (dmaseg->iova + dmaseg->size) <= iovaddr)) { + DPAA2_BUS_ERR("MEM: New IOVA Range(%" PRIx64 " ~ %" PRIx64 ")", + iovaddr, iovaddr + len); + DPAA2_BUS_ERR("MEM: Overlap with (%" PRIx64 " ~ %" PRIx64 ")", + dmaseg->iova, + dmaseg->iova + dmaseg->size); + return -EEXIST; + } + } + goto start_mapping; + +io_mapping_check: + TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) { + if (!((vaddr + len) <= dmaseg->vaddr || + (dmaseg->vaddr + dmaseg->size) <= vaddr)) { + DPAA2_BUS_ERR("IO: New VA Range (%" PRIx64 " ~ %" PRIx64 ")", + vaddr, vaddr + len); + DPAA2_BUS_ERR("IO: Overlap with (%" PRIx64 " ~ %" PRIx64 ")", + dmaseg->vaddr, + dmaseg->vaddr + dmaseg->size); + return -EEXIST; + } + if (!((iovaddr + len) <= dmaseg->iova || + (dmaseg->iova + dmaseg->size) <= iovaddr)) { + DPAA2_BUS_ERR("IO: New IOVA Range(%" PRIx64 " ~ %" PRIx64 ")", + iovaddr, iovaddr + len); + DPAA2_BUS_ERR("IO: Overlap with (%" PRIx64 " ~ %" PRIx64 ")", + dmaseg->iova, + dmaseg->iova + dmaseg->size); + return -EEXIST; + } + } + +start_mapping: + fd = fslmc_vfio_group_fd_by_name(group_name); + if (fd <= 0) { + DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)", + __func__, group_name, fd); + if (fd < 0) + return fd; + return -EIO; + } + if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) { + DPAA2_BUS_DEBUG("Running in NOIOMMU mode"); + if (phy != iovaddr) { + DPAA2_BUS_ERR("IOVA should support with IOMMU"); + return -EIO; + } + goto end_mapping; + } + + dma_map.size = len; + dma_map.vaddr = vaddr; + dma_map.iova = iovaddr; + + /* SET DMA MAP for IOMMU */ + if (!fslmc_vfio_container_connected(fd)) { + DPAA2_BUS_ERR("Container is not connected"); + return -EIO; + } + + ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_MAP_DMA, + &dma_map); + if (ret) { + DPAA2_BUS_ERR("%s(%d) VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")", + is_io ? "DMA IO map err" : "DMA MEM map err", + errno, vaddr, iovaddr, phy); + return ret; + } + +end_mapping: + dmaseg = malloc(sizeof(struct fslmc_dmaseg)); + if (!dmaseg) { + DPAA2_BUS_ERR("DMA segment malloc failed!"); + return -ENOMEM; + } + dmaseg->vaddr = vaddr; + dmaseg->iova = iovaddr; + dmaseg->size = len; + if (is_io) { + TAILQ_INSERT_TAIL(&fslmc_iosegs, dmaseg, next); + } else { + fslmc_mem_map_num++; + if (fslmc_mem_map_num == 1) + fslmc_mem_va2iova = iovaddr - vaddr; + else + fslmc_mem_va2iova = RTE_BAD_IOVA; + TAILQ_INSERT_TAIL(&fslmc_memsegs, dmaseg, next); + } + DPAA2_BUS_LOG(NOTICE, + "%s(%zx): VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")", + is_io ? "DMA I/O map size" : "DMA MEM map size", + len, vaddr, iovaddr, phy); + + return 0; +} + +static int +fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr, size_t len) +{ + struct vfio_iommu_type1_dma_unmap dma_unmap = { + .argsz = sizeof(struct vfio_iommu_type1_dma_unmap), + .flags = 0, + }; + int ret, fd, is_io = 0; + const char *group_name = fslmc_vfio_get_group_name(); + struct fslmc_dmaseg *dmaseg = NULL; + + TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) { + if (((vaddr && dmaseg->vaddr == vaddr) || !vaddr) && + dmaseg->iova == iovaddr && + dmaseg->size == len) { + is_io = 0; + break; + } + } + + if (!dmaseg) { + TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) { + if (((vaddr && dmaseg->vaddr == vaddr) || !vaddr) && + dmaseg->iova == iovaddr && + dmaseg->size == len) { + is_io = 1; + break; + } } } - fslmc_iommu_type = (rte_vfio_noiommu_is_enabled() == 1) ? - RTE_VFIO_NOIOMMU : VFIO_TYPE1_IOMMU; + if (!dmaseg) { + DPAA2_BUS_ERR("IOVA(%" PRIx64 ") with length(%zx) not mapped", + iovaddr, len); + return 0; + } + + fd = fslmc_vfio_group_fd_by_name(group_name); + if (fd <= 0) { + DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)", + __func__, group_name, fd); + if (fd < 0) + return fd; + return -EIO; + } + if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) { + DPAA2_BUS_DEBUG("Running in NOIOMMU mode"); + return 0; + } + + dma_unmap.size = len; + dma_unmap.iova = iovaddr; + + /* SET DMA MAP for IOMMU */ + if (!fslmc_vfio_container_connected(fd)) { + DPAA2_BUS_ERR("Container is not connected "); + return -EIO; + } + + ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_UNMAP_DMA, + &dma_unmap); + if (ret) { + DPAA2_BUS_ERR("DMA un-map IOVA(%" PRIx64 " ~ %" PRIx64 ") err(%d)", + iovaddr, iovaddr + len, errno); + return ret; + } + + if (is_io) { + TAILQ_REMOVE(&fslmc_iosegs, dmaseg, next); + } else { + TAILQ_REMOVE(&fslmc_memsegs, dmaseg, next); + fslmc_mem_map_num--; + if (TAILQ_EMPTY(&fslmc_memsegs)) + fslmc_mem_va2iova = RTE_BAD_IOVA; + } + + free(dmaseg); + + return 0; +} + +uint64_t +rte_fslmc_cold_mem_vaddr_to_iova(void *vaddr, + uint64_t size) +{ + struct fslmc_dmaseg *dmaseg; + uint64_t va; + + va = (uint64_t)vaddr; + TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) { + if (va >= dmaseg->vaddr && + (va + size) < (dmaseg->vaddr + dmaseg->size)) { + return dmaseg->iova + va - dmaseg->vaddr; + } + } + + return RTE_BAD_IOVA; +} + +void * +rte_fslmc_cold_mem_iova_to_vaddr(uint64_t iova, + uint64_t size) +{ + struct fslmc_dmaseg *dmaseg; - /* get group number */ - ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES, - fslmc_container, groupid); - if (ret <= 0) { - DPAA2_BUS_ERR("Unable to find %s IOMMU group", fslmc_container); - return -1; + TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) { + if (iova >= dmaseg->iova && + (iova + size) < (dmaseg->iova + dmaseg->size)) + return (void *)((uintptr_t)dmaseg->vaddr + + (uintptr_t)(iova - dmaseg->iova)); } - DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d", - fslmc_container, *groupid); - - return 0; + return NULL; } -static int -vfio_connect_container(void) +__rte_hot uint64_t +rte_fslmc_mem_vaddr_to_iova(void *vaddr) { - int fd, ret; + if (likely(fslmc_mem_va2iova != RTE_BAD_IOVA)) + return (uint64_t)vaddr + fslmc_mem_va2iova; - if (vfio_container.used) { - DPAA2_BUS_DEBUG("No container available"); - return -1; - } + return rte_fslmc_cold_mem_vaddr_to_iova(vaddr, 0); +} - /* Try connecting to vfio container if already created */ - if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, - &vfio_container.fd)) { - DPAA2_BUS_DEBUG( - "Container pre-exists with FD[0x%x] for this group", - vfio_container.fd); - vfio_group.container = &vfio_container; - return 0; - } +__rte_hot void * +rte_fslmc_mem_iova_to_vaddr(uint64_t iova) +{ + if (likely(fslmc_mem_va2iova != RTE_BAD_IOVA)) + return (void *)((uintptr_t)iova - (uintptr_t)fslmc_mem_va2iova); - /* Opens main vfio file descriptor which represents the "container" */ - fd = rte_vfio_get_container_fd(); - if (fd < 0) { - DPAA2_BUS_ERR("Failed to open VFIO container"); - return -errno; - } + return rte_fslmc_cold_mem_iova_to_vaddr(iova, 0); +} - /* Check whether support for SMMU type IOMMU present or not */ - if (ioctl(fd, VFIO_CHECK_EXTENSION, fslmc_iommu_type)) { - /* Connect group to container */ - ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd); - if (ret) { - DPAA2_BUS_ERR("Failed to setup group container"); - close(fd); - return -errno; - } +uint64_t +rte_fslmc_io_vaddr_to_iova(void *vaddr) +{ + struct fslmc_dmaseg *dmaseg = NULL; + uint64_t va = (uint64_t)vaddr; - ret = ioctl(fd, VFIO_SET_IOMMU, fslmc_iommu_type); - if (ret) { - DPAA2_BUS_ERR("Failed to setup VFIO iommu"); - close(fd); - return -errno; - } - } else { - DPAA2_BUS_ERR("No supported IOMMU available"); - close(fd); - return -EINVAL; + TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) { + if ((va >= dmaseg->vaddr) && + va < dmaseg->vaddr + dmaseg->size) + return dmaseg->iova + va - dmaseg->vaddr; } - vfio_container.used = 1; - vfio_container.fd = fd; - vfio_container.group = &vfio_group; - vfio_group.container = &vfio_container; - - return 0; + return RTE_BAD_IOVA; } -static int vfio_map_irq_region(struct fslmc_vfio_group *group) +void * +rte_fslmc_io_iova_to_vaddr(uint64_t iova) { - int ret; - unsigned long *vaddr = NULL; - struct vfio_iommu_type1_dma_map map = { - .argsz = sizeof(map), - .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, - .vaddr = 0x6030000, - .iova = 0x6030000, - .size = 0x1000, - }; + struct fslmc_dmaseg *dmaseg = NULL; - vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE | - PROT_READ, MAP_SHARED, container_device_fd, 0x6030000); - if (vaddr == MAP_FAILED) { - DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno); - return -errno; + TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) { + if ((iova >= dmaseg->iova) && + iova < dmaseg->iova + dmaseg->size) + return (void *)((uintptr_t)dmaseg->vaddr + + (uintptr_t)(iova - dmaseg->iova)); } - msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64); - map.vaddr = (unsigned long)vaddr; - ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map); - if (ret == 0) - return 0; - - DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno); - return -errno; + return NULL; } -static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len); -static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len); - static void -fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, - void *arg __rte_unused) +fslmc_memevent_cb(enum rte_mem_event type, const void *addr, + size_t len, void *arg __rte_unused) { struct rte_memseg_list *msl; struct rte_memseg *ms; @@ -223,12 +1088,9 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, virt_addr = ms->addr_64; map_len = ms->len; - DPAA2_BUS_DEBUG("Request for %s, va=%p, " - "virt_addr=0x%" PRIx64 ", " - "iova=0x%" PRIx64 ", map_len=%zu", - type == RTE_MEM_EVENT_ALLOC ? - "alloc" : "dealloc", - va, virt_addr, iova_addr, map_len); + DPAA2_BUS_DEBUG("%s, va=%p, virt=%" PRIx64 ", iova=%" PRIx64 ", len=%zu", + type == RTE_MEM_EVENT_ALLOC ? "alloc" : "dealloc", + va, virt_addr, iova_addr, map_len); /* iova_addr may be set to RTE_BAD_IOVA */ if (iova_addr == RTE_BAD_IOVA) { @@ -243,103 +1105,20 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len); if (ret != 0) { - DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. " - "Map=%d, addr=%p, len=%zu, err:(%d)", - type, va, map_len, ret); + DPAA2_BUS_ERR("%s: Map=%d, addr=%p, len=%zu, err:(%d)", + type == RTE_MEM_EVENT_ALLOC ? + "DMA Mapping failed. " : + "DMA Unmapping failed. ", + type, va, map_len, ret); return; } cur_len += map_len; } - if (type == RTE_MEM_EVENT_ALLOC) - DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu", - addr, len); - else - DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu", - addr, len); -} - -static int -fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len) -{ - struct fslmc_vfio_group *group; - struct vfio_iommu_type1_dma_map dma_map = { - .argsz = sizeof(struct vfio_iommu_type1_dma_map), - .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, - }; - int ret; - - if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) { - DPAA2_BUS_DEBUG("Running in NOIOMMU mode"); - return 0; - } - - dma_map.size = len; - dma_map.vaddr = vaddr; - -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - dma_map.iova = iovaddr; -#else - dma_map.iova = dma_map.vaddr; -#endif - - /* SET DMA MAP for IOMMU */ - group = &vfio_group; - - if (!group->container) { - DPAA2_BUS_ERR("Container is not connected "); - return -1; - } - - DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"", - (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size); - ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map); - if (ret) { - DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)", - errno); - return -1; - } - - return 0; -} - -static int -fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len) -{ - struct fslmc_vfio_group *group; - struct vfio_iommu_type1_dma_unmap dma_unmap = { - .argsz = sizeof(struct vfio_iommu_type1_dma_unmap), - .flags = 0, - }; - int ret; - - if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) { - DPAA2_BUS_DEBUG("Running in NOIOMMU mode"); - return 0; - } - - dma_unmap.size = len; - dma_unmap.iova = vaddr; - - /* SET DMA MAP for IOMMU */ - group = &vfio_group; - - if (!group->container) { - DPAA2_BUS_ERR("Container is not connected "); - return -1; - } - - DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"", - (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size); - ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap); - if (ret) { - DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)", - errno); - return -1; - } - - return 0; + DPAA2_BUS_DEBUG("Total %s: addr=%p, len=%zu", + type == RTE_MEM_EVENT_ALLOC ? "Mapped" : "Unmapped", + addr, len); } static int @@ -366,53 +1145,27 @@ fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused, int rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size) { - int ret; - struct fslmc_vfio_group *group; - struct vfio_iommu_type1_dma_map dma_map = { - .argsz = sizeof(struct vfio_iommu_type1_dma_map), - .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, - }; - - if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) { - DPAA2_BUS_DEBUG("Running in NOIOMMU mode"); - return 0; - } - - /* SET DMA MAP for IOMMU */ - group = &vfio_group; - if (!group->container) { - DPAA2_BUS_ERR("Container is not connected"); - return -1; - } - - dma_map.size = size; - dma_map.vaddr = vaddr; - dma_map.iova = iova; - - DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64, - (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova, - (uint64_t)dma_map.size); - ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, - &dma_map); - if (ret) { - DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", - errno); - return ret; - } + return fslmc_map_dma(vaddr, iova, size); +} - return 0; +int +rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size) +{ + return fslmc_unmap_dma(0, iova, size); } -int rte_fslmc_vfio_dmamap(void) +int +fslmc_vfio_dmamap(void) { int i = 0, ret; /* Lock before parsing and registering callback to memory subsystem */ rte_mcfg_mem_read_lock(); - if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) { + ret = rte_memseg_walk(fslmc_dmamap_seg, &i); + if (ret) { rte_mcfg_mem_read_unlock(); - return -1; + return ret; } ret = rte_mem_event_callback_register("fslmc_memevent_clb", @@ -426,12 +1179,6 @@ int rte_fslmc_vfio_dmamap(void) DPAA2_BUS_DEBUG("Total %d segments found.", i); - /* TODO - This is a W.A. as VFIO currently does not add the mapping of - * the interrupt region to SMMU. This should be removed once the - * support is added in the Kernel. - */ - vfio_map_irq_region(&vfio_group); - /* Existing segments have been mapped and memory callback for hotplug * has been installed. */ @@ -441,96 +1188,27 @@ int rte_fslmc_vfio_dmamap(void) } static int -fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr, - int *vfio_dev_fd, struct vfio_device_info *device_info) +fslmc_vfio_setup_device(const char *dev_addr, + int *vfio_dev_fd, struct vfio_device_info *device_info) { struct vfio_group_status group_status = { .argsz = sizeof(group_status) }; - int vfio_group_fd, vfio_container_fd, iommu_group_no, ret; - - /* get group number */ - ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no); - if (ret < 0) - return -1; - - /* get the actual group fd */ - vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no); - if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT) - return -1; - - /* - * if vfio_group_fd == -ENOENT, that means the device - * isn't managed by VFIO - */ - if (vfio_group_fd == -ENOENT) { - DPAA2_BUS_WARN(" %s not managed by VFIO driver, skipping", - dev_addr); - return 1; - } - - /* Opens main vfio file descriptor which represents the "container" */ - vfio_container_fd = rte_vfio_get_container_fd(); - if (vfio_container_fd < 0) { - DPAA2_BUS_ERR("Failed to open VFIO container"); - return -errno; + int vfio_group_fd, ret; + const char *group_name = fslmc_vfio_get_group_name(); + + vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name); + if (vfio_group_fd <= 0) { + DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)", + __func__, group_name, vfio_group_fd); + if (vfio_group_fd < 0) + return vfio_group_fd; + return -EIO; } - /* check if the group is viable */ - ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status); - if (ret) { - DPAA2_BUS_ERR(" %s cannot get group status, " - "error %i (%s)", dev_addr, - errno, strerror(errno)); - close(vfio_group_fd); - rte_vfio_clear_group(vfio_group_fd); - return -1; - } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) { - DPAA2_BUS_ERR(" %s VFIO group is not viable!", dev_addr); - close(vfio_group_fd); - rte_vfio_clear_group(vfio_group_fd); - return -1; - } - /* At this point, we know that this group is viable (meaning, - * all devices are either bound to VFIO or not bound to anything) - */ - - /* check if group does not have a container yet */ - if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) { - - /* add group to a container */ - ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER, - &vfio_container_fd); - if (ret) { - DPAA2_BUS_ERR(" %s cannot add VFIO group to container, " - "error %i (%s)", dev_addr, - errno, strerror(errno)); - close(vfio_group_fd); - close(vfio_container_fd); - rte_vfio_clear_group(vfio_group_fd); - return -1; - } - - /* - * set an IOMMU type for container - * - */ - if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, - fslmc_iommu_type)) { - ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, - fslmc_iommu_type); - if (ret) { - DPAA2_BUS_ERR("Failed to setup VFIO iommu"); - close(vfio_group_fd); - close(vfio_container_fd); - return -errno; - } - } else { - DPAA2_BUS_ERR("No supported IOMMU available"); - close(vfio_group_fd); - close(vfio_container_fd); - return -EINVAL; - } + if (!fslmc_vfio_container_connected(vfio_group_fd)) { + DPAA2_BUS_ERR("Container is not connected"); + return -EIO; } /* get a file descriptor for the device */ @@ -540,26 +1218,21 @@ fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr, * the VFIO group or the container not having IOMMU configured. */ - DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr); - close(vfio_group_fd); - close(vfio_container_fd); - rte_vfio_clear_group(vfio_group_fd); - return -1; + DPAA2_BUS_ERR("Getting a vfio_dev_fd for %s from %s failed", + dev_addr, group_name); + return -EIO; } /* test and setup the device */ ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info); if (ret) { - DPAA2_BUS_ERR(" %s cannot get device info, error %i (%s)", - dev_addr, errno, strerror(errno)); - close(*vfio_dev_fd); - close(vfio_group_fd); - close(vfio_container_fd); - rte_vfio_clear_group(vfio_group_fd); - return -1; + DPAA2_BUS_ERR("%s cannot get device info err(%d)(%s)", + dev_addr, errno, strerror(errno)); + return ret; } - return 0; + return fslmc_vfio_group_add_dev(vfio_group_fd, *vfio_dev_fd, + dev_addr); } static intptr_t vfio_map_mcp_obj(const char *mcp_obj) @@ -571,8 +1244,7 @@ static intptr_t vfio_map_mcp_obj(const char *mcp_obj) struct vfio_device_info d_info = { .argsz = sizeof(d_info) }; struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; - fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj, - &mc_fd, &d_info); + fslmc_vfio_setup_device(mcp_obj, &mc_fd, &d_info); /* getting device region info*/ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); @@ -642,8 +1314,7 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index) vfio_dev_fd = rte_intr_dev_fd_get(intr_handle); ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) - DPAA2_BUS_ERR( - "Error disabling dpaa2 interrupts for fd %d", + DPAA2_BUS_ERR("Error disabling dpaa2 interrupts for fd %d", rte_intr_fd_get(intr_handle)); return ret; @@ -668,7 +1339,7 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, if (ret < 0) { DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)", i, errno, strerror(errno)); - return -1; + return ret; } /* if this vector cannot be used with eventfd, @@ -682,8 +1353,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (fd < 0) { DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)", - errno, strerror(errno)); - return -1; + errno, strerror(errno)); + return fd; } if (rte_intr_fd_set(intr_handle, fd)) @@ -699,7 +1370,61 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, } /* if we're here, we haven't found a suitable interrupt vector */ - return -1; + return -EIO; +} + +static void +fslmc_close_iodevices(struct rte_dpaa2_device *dev, + int vfio_fd) +{ + struct rte_dpaa2_object *object = NULL; + struct rte_dpaa2_driver *drv; + int ret, probe_all; + + switch (dev->dev_type) { + case DPAA2_IO: + case DPAA2_CON: + case DPAA2_CI: + case DPAA2_BPOOL: + case DPAA2_MUX: + TAILQ_FOREACH(object, &dpaa2_obj_list, next) { + if (dev->dev_type == object->dev_type) + object->close(dev->object_id); + else + continue; + } + break; + case DPAA2_ETH: + case DPAA2_CRYPTO: + case DPAA2_QDMA: + probe_all = rte_fslmc_bus.bus.conf.scan_mode != + RTE_BUS_SCAN_ALLOWLIST; + TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) { + if (drv->drv_type != dev->dev_type) + continue; + if (rte_dev_is_probed(&dev->device)) + continue; + if (probe_all || + (dev->device.devargs && + dev->device.devargs->policy == + RTE_DEV_ALLOWED)) { + ret = drv->remove(dev); + if (ret) + DPAA2_BUS_ERR("Unable to remove"); + } + } + break; + default: + break; + } + + ret = fslmc_vfio_group_remove_dev(vfio_fd, dev->device.name); + if (ret) { + DPAA2_BUS_ERR("Failed to remove %s from vfio", + dev->device.name); + } + DPAA2_BUS_LOG(DEBUG, "Device (%s) Closed", + dev->device.name); } /* @@ -709,17 +1434,21 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, static int fslmc_process_iodevices(struct rte_dpaa2_device *dev) { - int dev_fd; + int dev_fd, ret; struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; struct rte_dpaa2_object *object = NULL; - fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name, - &dev_fd, &device_info); + ret = fslmc_vfio_setup_device(dev->device.name, &dev_fd, + &device_info); + if (ret) + return ret; switch (dev->dev_type) { case DPAA2_ETH: - rte_dpaa2_vfio_setup_intr(dev->intr_handle, dev_fd, - device_info.num_irqs); + ret = rte_dpaa2_vfio_setup_intr(dev->intr_handle, dev_fd, + device_info.num_irqs); + if (ret) + return ret; break; case DPAA2_CON: case DPAA2_IO: @@ -730,8 +1459,7 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) case DPAA2_DPRC: TAILQ_FOREACH(object, &dpaa2_obj_list, next) { if (dev->dev_type == object->dev_type) - object->create(dev_fd, &device_info, - dev->object_id); + object->create(dev_fd, &device_info, dev); else continue; } @@ -807,6 +1535,58 @@ fslmc_process_mcp(struct rte_dpaa2_device *dev) return ret; } +int +fslmc_vfio_close_group(void) +{ + struct rte_dpaa2_device *dev, *dev_temp; + int vfio_group_fd; + const char *group_name = fslmc_vfio_get_group_name(); + + vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name); + if (vfio_group_fd <= 0) { + DPAA2_BUS_INFO("%s: Get fd by name(%s) failed(%d)", + __func__, group_name, vfio_group_fd); + if (vfio_group_fd < 0) + return vfio_group_fd; + return -EIO; + } + + RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) { + if (dev->device.devargs && + dev->device.devargs->policy == RTE_DEV_BLOCKED) { + DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping", + dev->device.name); + TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); + continue; + } + switch (dev->dev_type) { + case DPAA2_ETH: + case DPAA2_CRYPTO: + case DPAA2_QDMA: + case DPAA2_IO: + fslmc_close_iodevices(dev, vfio_group_fd); + break; + case DPAA2_CON: + case DPAA2_CI: + case DPAA2_BPOOL: + case DPAA2_MUX: + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + continue; + + fslmc_close_iodevices(dev, vfio_group_fd); + break; + case DPAA2_DPRTC: + default: + DPAA2_BUS_DEBUG("Device cannot be closed: Not supported (%s)", + dev->device.name); + } + } + + fslmc_vfio_clear_group(vfio_group_fd); + + return 0; +} + int fslmc_vfio_process_group(void) { @@ -861,7 +1641,7 @@ fslmc_vfio_process_group(void) ret = fslmc_process_mcp(dev); if (ret) { DPAA2_BUS_ERR("Unable to map MC Portal"); - return -1; + return ret; } found_mportal = 1; } @@ -878,7 +1658,7 @@ fslmc_vfio_process_group(void) /* Cannot continue if there is not even a single mportal */ if (!found_mportal) { DPAA2_BUS_ERR("No MC Portal device found. Not continuing"); - return -1; + return -EIO; } /* Search for DPRC device next as it updates endpoint of @@ -890,7 +1670,7 @@ fslmc_vfio_process_group(void) ret = fslmc_process_iodevices(dev); if (ret) { DPAA2_BUS_ERR("Unable to process dprc"); - return -1; + return ret; } TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); } @@ -947,7 +1727,7 @@ fslmc_vfio_process_group(void) if (ret) { DPAA2_BUS_DEBUG("Dev (%s) init failed", dev->device.name); - return -1; + return ret; } break; @@ -971,7 +1751,7 @@ fslmc_vfio_process_group(void) if (ret) { DPAA2_BUS_DEBUG("Dev (%s) init failed", dev->device.name); - return -1; + return ret; } break; @@ -992,84 +1772,85 @@ fslmc_vfio_process_group(void) int fslmc_vfio_setup_group(void) { - int groupid; - int ret; - int vfio_container_fd; + int vfio_group_fd, vfio_container_fd, ret; struct vfio_group_status status = { .argsz = sizeof(status) }; - - /* if already done once */ - if (container_device_fd) - return 0; - - ret = fslmc_get_container_group(&groupid); - if (ret) - return ret; - - /* In case this group was already opened, continue without any - * processing. - */ - if (vfio_group.groupid == groupid) { - DPAA2_BUS_ERR("groupid already exists %d", groupid); - return 0; + const char *group_name = fslmc_vfio_get_group_name(); + + /* MC VFIO setup entry */ + vfio_container_fd = fslmc_vfio_container_fd(); + if (vfio_container_fd <= 0) { + vfio_container_fd = fslmc_vfio_open_container_fd(); + if (vfio_container_fd < 0) { + DPAA2_BUS_ERR("Failed to create MC VFIO container"); + return vfio_container_fd; + } } - ret = rte_vfio_container_create(); - if (ret < 0) { - DPAA2_BUS_ERR("Failed to open VFIO container"); - return ret; + if (!group_name) { + DPAA2_BUS_DEBUG("DPAA2: DPRC not available"); + return -EINVAL; } - vfio_container_fd = ret; - /* Get the actual group fd */ - ret = rte_vfio_container_group_bind(vfio_container_fd, groupid); - if (ret < 0) - return ret; - vfio_group.fd = ret; + vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name); + if (vfio_group_fd < 0) { + vfio_group_fd = fslmc_vfio_open_group_fd(group_name); + if (vfio_group_fd < 0) { + DPAA2_BUS_ERR("open group name(%s) failed(%d)", + group_name, vfio_group_fd); + return -rte_errno; + } + } /* Check group viability */ - ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status); + ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &status); if (ret) { - DPAA2_BUS_ERR("VFIO error getting group status"); - close(vfio_group.fd); - rte_vfio_clear_group(vfio_group.fd); + DPAA2_BUS_ERR("VFIO(%s:fd=%d) error getting group status(%d)", + group_name, vfio_group_fd, ret); + fslmc_vfio_clear_group(vfio_group_fd); return ret; } if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { DPAA2_BUS_ERR("VFIO group not viable"); - close(vfio_group.fd); - rte_vfio_clear_group(vfio_group.fd); + fslmc_vfio_clear_group(vfio_group_fd); return -EPERM; } - /* Since Group is VIABLE, Store the groupid */ - vfio_group.groupid = groupid; /* check if group does not have a container yet */ if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) { /* Now connect this IOMMU group to given container */ - ret = vfio_connect_container(); - if (ret) { - DPAA2_BUS_ERR( - "Error connecting container with groupid %d", - groupid); - close(vfio_group.fd); - rte_vfio_clear_group(vfio_group.fd); - return ret; - } + ret = vfio_connect_container(vfio_container_fd, + vfio_group_fd); + } else { + /* Here is supposed in secondary process, + * group has been set to container in primary process. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + DPAA2_BUS_WARN("This group has been set container?"); + ret = fslmc_vfio_connect_container(vfio_group_fd); + } + if (ret) { + DPAA2_BUS_ERR("vfio group connect failed(%d)", ret); + fslmc_vfio_clear_group(vfio_group_fd); + return ret; } /* Get Device information */ - ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, fslmc_container); + ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, group_name); if (ret < 0) { - DPAA2_BUS_ERR("Error getting device %s fd from group %d", - fslmc_container, vfio_group.groupid); - close(vfio_group.fd); - rte_vfio_clear_group(vfio_group.fd); + DPAA2_BUS_ERR("Error getting device %s fd", group_name); + fslmc_vfio_clear_group(vfio_group_fd); + return ret; + } + + ret = fslmc_vfio_mp_sync_setup(); + if (ret) { + DPAA2_BUS_ERR("VFIO MP sync setup failed!"); + fslmc_vfio_clear_group(vfio_group_fd); return ret; } - container_device_fd = ret; - DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]", - container_device_fd); + + DPAA2_BUS_DEBUG("VFIO GROUP FD is %d", vfio_group_fd); return 0; } diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h index 133606a9fd..815970ec38 100644 --- a/drivers/bus/fslmc/fslmc_vfio.h +++ b/drivers/bus/fslmc/fslmc_vfio.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016,2019 NXP + * Copyright 2016,2019-2023 NXP * */ @@ -20,26 +20,28 @@ #define DPAA2_MC_DPBP_DEVID 10 #define DPAA2_MC_DPCI_DEVID 11 -typedef struct fslmc_vfio_device { +struct fslmc_vfio_device { + LIST_ENTRY(fslmc_vfio_device) next; int fd; /* fslmc root container device ?? */ int index; /*index of child object */ + char dev_name[64]; struct fslmc_vfio_device *child; /* Child object */ -} fslmc_vfio_device; +}; -typedef struct fslmc_vfio_group { +struct fslmc_vfio_group { + LIST_ENTRY(fslmc_vfio_group) next; int fd; /* /dev/vfio/"groupid" */ int groupid; - struct fslmc_vfio_container *container; - int object_index; - struct fslmc_vfio_device *vfio_device; -} fslmc_vfio_group; + int connected; + char group_name[64]; /* dprc.x*/ + int iommu_type; + LIST_HEAD(, fslmc_vfio_device) vfio_devices; +}; -typedef struct fslmc_vfio_container { +struct fslmc_vfio_container { int fd; /* /dev/vfio/vfio */ - int used; - int index; /* index in group list */ - struct fslmc_vfio_group *group; -} fslmc_vfio_container; + LIST_HEAD(, fslmc_vfio_group) groups; +}; extern char *fslmc_container; @@ -55,9 +57,8 @@ int rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle, int fslmc_vfio_setup_group(void); int fslmc_vfio_process_group(void); +int fslmc_vfio_close_group(void); char *fslmc_get_container(void); -int fslmc_get_container_group(int *gropuid); -int rte_fslmc_vfio_dmamap(void); -int rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size); - +int fslmc_get_container_group(const char *group_name, int *gropuid); +int fslmc_vfio_dmamap(void); #endif /* _FSLMC_VFIO_H_ */ diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c index a3382ed142..97c08fa713 100644 --- a/drivers/bus/fslmc/mc/dpio.c +++ b/drivers/bus/fslmc/mc/dpio.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2017 NXP + * Copyright 2016-2023 NXP * */ #include @@ -376,6 +376,98 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, return 0; } +/** + * dpio_set_stashing_destination_by_core_id() - Set the stashing destination source + * using the core id. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPIO object + * @core_id: Core id stashing destination + * + * Return: '0' on Success; Error code otherwise. + */ +int dpio_set_stashing_destination_by_core_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t core_id) +{ + struct dpio_stashing_dest_by_core_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST_BY_CORE_ID, + cmd_flags, + token); + cmd_params = (struct dpio_stashing_dest_by_core_id *)cmd.params; + cmd_params->core_id = core_id; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpio_set_stashing_destination_source() - Set the stashing destination source. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPIO object + * @ss: Stashing destination source (0 manual/1 automatic) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpio_set_stashing_destination_source(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t ss) +{ + struct dpio_stashing_dest_source *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST_SOURCE, + cmd_flags, + token); + cmd_params = (struct dpio_stashing_dest_source *)cmd.params; + cmd_params->ss = ss; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpio_get_stashing_destination_source() - Get the stashing destination source. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPIO object + * @ss: Returns the stashing destination source (0 manual/1 automatic) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpio_get_stashing_destination_source(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t *ss) +{ + struct dpio_stashing_dest_source *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST_SOURCE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpio_stashing_dest_source *)cmd.params; + *ss = rsp_params->ss; + + return 0; +} + /** * dpio_add_static_dequeue_channel() - Add a static dequeue channel. * @mc_io: Pointer to MC portal's I/O object diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h index db72477c8a..e3a626077e 100644 --- a/drivers/bus/fslmc/mc/fsl_dpcon.h +++ b/drivers/bus/fslmc/mc/fsl_dpcon.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2017-2019 NXP + * Copyright 2017-2021, 2024 NXP * */ #ifndef __FSL_DPCON_H @@ -28,6 +28,7 @@ int dpcon_open(struct fsl_mc_io *mc_io, int dpcon_id, uint16_t *token); +__rte_internal int dpcon_close(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); @@ -51,10 +52,12 @@ int dpcon_destroy(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint32_t obj_id); +__rte_internal int dpcon_enable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); +__rte_internal int dpcon_disable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); @@ -64,6 +67,7 @@ int dpcon_is_enabled(struct fsl_mc_io *mc_io, uint16_t token, int *en); +__rte_internal int dpcon_reset(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token); diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h index c2db76bdf8..eddce58a5f 100644 --- a/drivers/bus/fslmc/mc/fsl_dpio.h +++ b/drivers/bus/fslmc/mc/fsl_dpio.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2017 NXP + * Copyright 2016-2023 NXP * */ #ifndef __FSL_DPIO_H @@ -87,11 +87,30 @@ int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, uint16_t token, uint8_t sdest); +__rte_internal int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, uint8_t *sdest); +__rte_internal +int dpio_set_stashing_destination_by_core_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t core_id); + +__rte_internal +int dpio_set_stashing_destination_source(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t ss); + +__rte_internal +int dpio_get_stashing_destination_source(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t *ss); + __rte_internal int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, uint32_t cmd_flags, diff --git a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h index 45ed01f809..360c68eaa5 100644 --- a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h +++ b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2019 NXP + * Copyright 2016-2023 NXP * */ #ifndef _FSL_DPIO_CMD_H @@ -40,6 +40,9 @@ #define DPIO_CMDID_GET_STASHING_DEST DPIO_CMD(0x121) #define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x122) #define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x123) +#define DPIO_CMDID_SET_STASHING_DEST_SOURCE DPIO_CMD(0x124) +#define DPIO_CMDID_GET_STASHING_DEST_SOURCE DPIO_CMD(0x125) +#define DPIO_CMDID_SET_STASHING_DEST_BY_CORE_ID DPIO_CMD(0x126) /* Macros for accessing command fields smaller than 1byte */ #define DPIO_MASK(field) \ @@ -98,6 +101,14 @@ struct dpio_stashing_dest { uint8_t sdest; }; +struct dpio_stashing_dest_source { + uint8_t ss; +}; + +struct dpio_stashing_dest_by_core_id { + uint8_t core_id; +}; + struct dpio_cmd_static_dequeue_channel { uint32_t dpcon_id; }; diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h index c6ea220df7..dfa51b3a86 100644 --- a/drivers/bus/fslmc/mc/fsl_dpmng.h +++ b/drivers/bus/fslmc/mc/fsl_dpmng.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2015 Freescale Semiconductor Inc. - * Copyright 2017-2022 NXP + * Copyright 2017-2023 NXP * */ #ifndef __FSL_DPMNG_H @@ -20,7 +20,7 @@ struct fsl_mc_io; * Management Complex firmware version information */ #define MC_VER_MAJOR 10 -#define MC_VER_MINOR 32 +#define MC_VER_MINOR 37 /** * struct mc_version diff --git a/drivers/bus/fslmc/mc/fsl_dprc_cmd.h b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h index 6efa5634d2..d5ba35b5f0 100644 --- a/drivers/bus/fslmc/mc/fsl_dprc_cmd.h +++ b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2021 NXP + * Copyright 2016-2023 NXP * */ @@ -10,13 +10,17 @@ /* Minimal supported DPRC Version */ #define DPRC_VER_MAJOR 6 -#define DPRC_VER_MINOR 6 +#define DPRC_VER_MINOR 7 /* Command versioning */ #define DPRC_CMD_BASE_VERSION 1 +#define DPRC_CMD_VERSION_2 2 +#define DPRC_CMD_VERSION_3 3 #define DPRC_CMD_ID_OFFSET 4 #define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) +#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_VERSION_2) +#define DPRC_CMD_V3(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_VERSION_3) /* Command IDs */ #define DPRC_CMDID_CLOSE DPRC_CMD(0x800) diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build index 162ca286fe..70098ad778 100644 --- a/drivers/bus/fslmc/meson.build +++ b/drivers/bus/fslmc/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright 2018,2021 NXP +# Copyright 2018-2023 NXP if not is_linux build = false @@ -27,3 +27,4 @@ sources = files( ) includes += include_directories('mc', 'qbman/include', 'portal') +includes += include_directories('../../../lib/eal/linux') diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c index d7f6e45b7d..0ca3b2b2e4 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016 NXP + * Copyright 2016,2020-2023 NXP * */ @@ -28,18 +28,30 @@ #include "portal/dpaa2_hw_pvt.h" #include "portal/dpaa2_hw_dpio.h" - TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev); static struct dpbp_dev_list dpbp_dev_list = TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */ +static struct dpaa2_dpbp_dev *get_dpbp_from_id(uint32_t dpbp_id) +{ + struct dpaa2_dpbp_dev *dpbp_dev = NULL; + + /* Get DPBP dev handle from list using index */ + TAILQ_FOREACH(dpbp_dev, &dpbp_dev_list, next) { + if (dpbp_dev->dpbp_id == dpbp_id) + break; + } + + return dpbp_dev; +} + static int dpaa2_create_dpbp_device(int vdev_fd __rte_unused, - struct vfio_device_info *obj_info __rte_unused, - int dpbp_id) + struct vfio_device_info *obj_info __rte_unused, + struct rte_dpaa2_device *obj) { struct dpaa2_dpbp_dev *dpbp_node; - int ret; + int ret, dpbp_id = obj->object_id; static int register_once; /* Allocate DPAA2 dpbp handle */ @@ -116,9 +128,25 @@ int dpaa2_dpbp_supported(void) return 0; } +static void +dpaa2_close_dpbp_device(int object_id) +{ + struct dpaa2_dpbp_dev *dpbp_dev = NULL; + + dpbp_dev = get_dpbp_from_id((uint32_t)object_id); + + if (dpbp_dev) { + dpaa2_free_dpbp_dev(dpbp_dev); + dpbp_close(&dpbp_dev->dpbp, CMD_PRI_LOW, dpbp_dev->token); + TAILQ_REMOVE(&dpbp_dev_list, dpbp_dev, next); + rte_free(dpbp_dev); + } +} + static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = { .dev_type = DPAA2_BPOOL, .create = dpaa2_create_dpbp_device, + .close = dpaa2_close_dpbp_device, }; RTE_PMD_REGISTER_DPAA2_OBJECT(dpbp, rte_dpaa2_dpbp_obj); diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c index 7e858a113f..0ea859f887 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright 2017 NXP + * Copyright 2017, 2020, 2023 NXP * */ @@ -30,17 +30,30 @@ TAILQ_HEAD(dpci_dev_list, dpaa2_dpci_dev); static struct dpci_dev_list dpci_dev_list = TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */ +static struct dpaa2_dpci_dev *get_dpci_from_id(uint32_t dpci_id) +{ + struct dpaa2_dpci_dev *dpci_dev = NULL; + + /* Get DPCI dev handle from list using index */ + TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) { + if (dpci_dev->dpci_id == dpci_id) + break; + } + + return dpci_dev; +} + static int rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, - struct vfio_device_info *obj_info __rte_unused, - int dpci_id) + struct vfio_device_info *obj_info __rte_unused, + struct rte_dpaa2_device *obj) { struct dpaa2_dpci_dev *dpci_node; struct dpci_attr attr; struct dpci_rx_queue_cfg rx_queue_cfg; struct dpci_rx_queue_attr rx_attr; struct dpci_tx_queue_attr tx_attr; - int ret, i; + int ret, i, dpci_id = obj->object_id; /* Allocate DPAA2 dpci handle */ dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0); @@ -81,22 +94,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, } /* Allocate DQ storage for the DPCI Rx queues */ - rxq = &(dpci_node->rx_queue[i]); - rxq->q_storage = rte_malloc("dq_storage", - sizeof(struct queue_storage_info_t), - RTE_CACHE_LINE_SIZE); - if (!rxq->q_storage) { - DPAA2_BUS_ERR("q_storage allocation failed"); - ret = -ENOMEM; + rxq = &dpci_node->rx_queue[i]; + ret = dpaa2_queue_storage_alloc(rxq, 1); + if (ret) goto err; - } - - memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t)); - ret = dpaa2_alloc_dq_storage(rxq->q_storage); - if (ret) { - DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed"); - goto err; - } } /* Enable the device */ @@ -141,12 +142,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, err: for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) { - struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]); + struct dpaa2_queue *rxq = &dpci_node->rx_queue[i]; - if (rxq->q_storage) { - dpaa2_free_dq_storage(rxq->q_storage); - rte_free(rxq->q_storage); - } + dpaa2_queue_storage_free(rxq, 1); } rte_free(dpci_node); @@ -179,9 +177,26 @@ void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci) } } + +static void +rte_dpaa2_close_dpci_device(int object_id) +{ + struct dpaa2_dpci_dev *dpci_dev = NULL; + + dpci_dev = get_dpci_from_id((uint32_t)object_id); + + if (dpci_dev) { + rte_dpaa2_free_dpci_dev(dpci_dev); + dpci_close(&dpci_dev->dpci, CMD_PRI_LOW, dpci_dev->token); + TAILQ_REMOVE(&dpci_dev_list, dpci_dev, next); + rte_free(dpci_dev); + } +} + static struct rte_dpaa2_object rte_dpaa2_dpci_obj = { .dev_type = DPAA2_CI, .create = rte_dpaa2_create_dpci_device, + .close = rte_dpaa2_close_dpci_device, }; RTE_PMD_REGISTER_DPAA2_OBJECT(dpci, rte_dpaa2_dpci_obj); diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c index d8a98326d9..2dfcf7a498 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c @@ -86,6 +86,19 @@ static int dpaa2_cluster_sz = 2; * Cluster 4 (ID = x07) : CPU14, CPU15; */ +static struct dpaa2_dpio_dev *get_dpio_dev_from_id(int32_t dpio_id) +{ + struct dpaa2_dpio_dev *dpio_dev = NULL; + + /* Get DPIO dev handle from list using index */ + TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) { + if (dpio_dev->hw_id == dpio_id) + break; + } + + return dpio_dev; +} + static int dpaa2_get_core_id(void) { @@ -327,9 +340,8 @@ dpaa2_affine_qbman_swp(void) } RTE_PER_LCORE(_dpaa2_io).dpio_dev = dpio_dev; - DPAA2_BUS_INFO( - "DPAA Portal=%p (%d) is affined to thread %" PRIu64, - dpio_dev, dpio_dev->index, tid); + DPAA2_BUS_DEBUG("Portal[%d] is affined to thread %" PRIu64, + dpio_dev->index, tid); } return 0; } @@ -349,9 +361,8 @@ dpaa2_affine_qbman_ethrx_swp(void) } RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev = dpio_dev; - DPAA2_BUS_INFO( - "DPAA Portal=%p (%d) is affined for eth rx to thread %" - PRIu64, dpio_dev, dpio_dev->index, tid); + DPAA2_BUS_DEBUG("Portal_eth_rx[%d] is affined to thread %" PRIu64, + dpio_dev->index, tid); } return 0; } @@ -366,16 +377,36 @@ static void dpaa2_portal_finish(void *arg) pthread_setspecific(dpaa2_portal_key, NULL); } +static void +dpaa2_close_dpio_device(int object_id) +{ + struct dpaa2_dpio_dev *dpio_dev = NULL; + + dpio_dev = get_dpio_dev_from_id((int32_t)object_id); + + if (dpio_dev) { + if (dpio_dev->dpio) { + dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, + dpio_dev->token); + dpio_close(dpio_dev->dpio, CMD_PRI_LOW, + dpio_dev->token); + rte_free(dpio_dev->dpio); + } + TAILQ_REMOVE(&dpio_dev_list, dpio_dev, next); + rte_free(dpio_dev); + } +} + static int dpaa2_create_dpio_device(int vdev_fd, - struct vfio_device_info *obj_info, - int object_id) + struct vfio_device_info *obj_info, + struct rte_dpaa2_device *obj) { struct dpaa2_dpio_dev *dpio_dev = NULL; struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)}; struct qbman_swp_desc p_des; struct dpio_attr attr; - int ret; + int ret, object_id = obj->object_id; if (obj_info->num_regions < NUM_DPIO_REGIONS) { DPAA2_BUS_ERR("Not sufficient number of DPIO regions"); @@ -582,6 +613,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage) for (i = 0; i < NUM_DQS_PER_QUEUE; i++) { rte_free(q_storage->dq_storage[i]); + q_storage->dq_storage[i] = NULL; } } @@ -591,7 +623,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage) int i = 0; for (i = 0; i < NUM_DQS_PER_QUEUE; i++) { - q_storage->dq_storage[i] = rte_malloc(NULL, + q_storage->dq_storage[i] = rte_zmalloc(NULL, dpaa2_dqrr_size * sizeof(struct qbman_result), RTE_CACHE_LINE_SIZE); if (!q_storage->dq_storage[i]) @@ -599,8 +631,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage) } return 0; fail: - while (--i >= 0) + while (--i >= 0) { rte_free(q_storage->dq_storage[i]); + q_storage->dq_storage[i] = NULL; + } return -1; } @@ -643,6 +677,7 @@ dpaa2_free_eq_descriptors(void) static struct rte_dpaa2_object rte_dpaa2_dpio_obj = { .dev_type = DPAA2_IO, .create = dpaa2_create_dpio_device, + .close = dpaa2_close_dpio_device, }; RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj); diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h index 7407f8d38d..328e1e788a 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2019 NXP + * Copyright 2016-2023 NXP * */ @@ -12,6 +12,7 @@ #include #include +#include struct dpaa2_io_portal_t { struct dpaa2_dpio_dev *dpio_dev; diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c index 65e2d799c3..a057cb1309 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c @@ -23,13 +23,13 @@ static struct dprc_dev_list dprc_dev_list static int rte_dpaa2_create_dprc_device(int vdev_fd __rte_unused, - struct vfio_device_info *obj_info __rte_unused, - int dprc_id) + struct vfio_device_info *obj_info __rte_unused, + struct rte_dpaa2_device *obj) { struct dpaa2_dprc_dev *dprc_node; struct dprc_endpoint endpoint1, endpoint2; struct rte_dpaa2_device *dev, *dev_tmp; - int ret; + int ret, dprc_id = obj->object_id; /* Allocate DPAA2 dprc handle */ dprc_node = rte_malloc(NULL, sizeof(struct dpaa2_dprc_dev), 0); @@ -50,6 +50,8 @@ rte_dpaa2_create_dprc_device(int vdev_fd __rte_unused, } RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_tmp) { + /** DPRC is always created before it's children are created.*/ + dev->container = dprc_node; if (dev->dev_type == DPAA2_ETH) { int link_state; diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h index 4c30e6db18..be0719aac6 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2021 NXP + * Copyright 2016-2024 NXP * */ @@ -14,6 +14,7 @@ #include #include +#include #ifndef false #define false 0 @@ -80,6 +81,8 @@ #define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */ #define DPAA2_DPCI_MAX_QUEUES 2 +#define DPAA2_INVALID_FLOW_ID 0xffff +#define DPAA2_INVALID_CGID 0xff struct dpaa2_queue; @@ -165,7 +168,9 @@ struct __rte_cache_aligned dpaa2_queue { uint64_t tx_pkts; uint64_t err_pkts; union { - struct queue_storage_info_t *q_storage; + /**Ingress*/ + struct queue_storage_info_t *q_storage[RTE_MAX_LCORE]; + /**Egress*/ struct qbman_result *cscn; }; struct rte_event ev; @@ -176,7 +181,7 @@ struct __rte_cache_aligned dpaa2_queue { struct dpaa2_queue *tx_conf_queue; int32_t eventfd; /*!< Event Fd of this queue */ uint16_t nb_desc; - uint16_t resv; + uint16_t tm_sw_td; /*!< TM software taildrop */ uint64_t offloads; uint64_t lpbk_cntx; uint8_t data_stashing_off; @@ -187,6 +192,38 @@ struct swp_active_dqs { uint64_t reserved[7]; }; +#define dpaa2_queue_storage_alloc(q, num) \ +({ \ + int ret = 0, i; \ + \ + for (i = 0; i < (num); i++) { \ + (q)->q_storage[i] = rte_zmalloc(NULL, \ + sizeof(struct queue_storage_info_t), \ + RTE_CACHE_LINE_SIZE); \ + if (!(q)->q_storage[i]) { \ + ret = -ENOBUFS; \ + break; \ + } \ + ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \ + if (ret) \ + break; \ + } \ + ret; \ +}) + +#define dpaa2_queue_storage_free(q, num) \ +({ \ + int i; \ + \ + for (i = 0; i < (num); i++) { \ + if ((q)->q_storage[i]) { \ + dpaa2_free_dq_storage((q)->q_storage[i]); \ + rte_free((q)->q_storage[i]); \ + (q)->q_storage[i] = NULL; \ + } \ + } \ +}) + #define NUM_MAX_SWP 64 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP]; @@ -326,6 +363,7 @@ enum qbman_fd_format { #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF)) #define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14) #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16) +#define DPAA2_GET_FD_DROPP(fd) (((fd)->simple.ctrl & 0x07000000) >> 24) #define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc) #define DPAA2_GET_FD_FLC(fd) \ (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo) @@ -366,83 +404,63 @@ enum qbman_fd_format { */ #define DPAA2_EQ_RESP_ALWAYS 1 -/* Various structures representing contiguous memory maps */ -struct dpaa2_memseg { - TAILQ_ENTRY(dpaa2_memseg) next; - char *vaddr; - rte_iova_t iova; - size_t len; -}; - -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA -extern uint8_t dpaa2_virt_mode; -static void *dpaa2_mem_ptov(phys_addr_t paddr) __rte_unused; - -static void *dpaa2_mem_ptov(phys_addr_t paddr) +static inline uint64_t +dpaa2_mem_va_to_iova(void *va) { - void *va; - - if (dpaa2_virt_mode) - return (void *)(size_t)paddr; - - va = (void *)dpaax_iova_table_get_va(paddr); - if (likely(va != NULL)) - return va; + if (likely(rte_eal_iova_mode() == RTE_IOVA_VA)) + return (uint64_t)va; - /* If not, Fallback to full memseg list searching */ - va = rte_mem_iova2virt(paddr); - - return va; + return rte_fslmc_mem_vaddr_to_iova(va); } -static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __rte_unused; - -static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) +static inline void * +dpaa2_mem_iova_to_va(uint64_t iova) { - const struct rte_memseg *memseg; - - if (dpaa2_virt_mode) - return vaddr; + if (likely(rte_eal_iova_mode() == RTE_IOVA_VA)) + return (void *)(uintptr_t)iova; - memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL); - if (memseg) - return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr); - return (size_t)NULL; + return rte_fslmc_mem_iova_to_vaddr(iova); } -/** - * When we are using Physical addresses as IO Virtual Addresses, - * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov - * wherever required. - * These routines are called with help of below MACRO's - */ - #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova) - -/** - * macro to convert Virtual address to IOVA - */ -#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr)) - -/** - * macro to convert IOVA to Virtual address - */ -#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova)) - -/** - * macro to convert modify the memory containing IOVA to Virtual address - */ +#define DPAA2_VADDR_TO_IOVA(_vaddr) \ + dpaa2_mem_va_to_iova((void *)(uintptr_t)_vaddr) +#define DPAA2_IOVA_TO_VADDR(_iova) \ + dpaa2_mem_iova_to_va((uint64_t)_iova) #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \ - {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); } + {_mem = (_type)DPAA2_IOVA_TO_VADDR(_mem); } + +#define DPAA2_VAMODE_VADDR_TO_IOVA(_vaddr) ((uint64_t)_vaddr) +#define DPAA2_VAMODE_IOVA_TO_VADDR(_iova) ((void *)_iova) +#define DPAA2_VAMODE_MODIFY_IOVA_TO_VADDR(_mem, _type) \ + {_mem = (_type)(_mem); } + +#define DPAA2_PAMODE_VADDR_TO_IOVA(_vaddr) \ + rte_fslmc_mem_vaddr_to_iova((void *)_vaddr) +#define DPAA2_PAMODE_IOVA_TO_VADDR(_iova) \ + rte_fslmc_mem_iova_to_vaddr((uint64_t)_iova) +#define DPAA2_PAMODE_MODIFY_IOVA_TO_VADDR(_mem, _type) \ + {_mem = (_type)rte_fslmc_mem_iova_to_vaddr(_mem); } + +static inline uint64_t +dpaa2_mem_va_to_iova_check(void *va, uint64_t size) +{ + uint64_t iova = rte_fslmc_cold_mem_vaddr_to_iova(va, size); + + if (iova == RTE_BAD_IOVA) + return RTE_BAD_IOVA; -#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */ + /** Double check the iova is valid.*/ + if (iova != rte_mem_virt2iova(va)) + return RTE_BAD_IOVA; -#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr) -#define DPAA2_VADDR_TO_IOVA(_vaddr) (phys_addr_t)(_vaddr) -#define DPAA2_IOVA_TO_VADDR(_iova) (void *)(_iova) -#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) + return iova; +} -#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */ +#define DPAA2_VADDR_TO_IOVA_AND_CHECK(_vaddr, size) \ + dpaa2_mem_va_to_iova_check(_vaddr, size) +#define DPAA2_IOVA_TO_VADDR_AND_CHECK(_iova, size) \ + rte_fslmc_cold_mem_iova_to_vaddr(_iova, size) static inline int check_swp_active_dqs(uint16_t dpio_index) diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h index ece5da5906..4ac3254bc7 100644 --- a/drivers/bus/fslmc/qbman/include/compat.h +++ b/drivers/bus/fslmc/qbman/include/compat.h @@ -16,7 +16,9 @@ #include #include #include + #include +#include /* The following definitions are primarily to allow the single-source driver * interfaces to be included by arbitrary program code. Ie. for interfaces that @@ -24,10 +26,6 @@ * with certain attributes and types used in those interfaces. */ -/* Required compiler attributes */ -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) - /* Required types */ typedef uint64_t dma_addr_t; diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h index 48ffb1b46e..7528b610e1 100644 --- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h +++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 2014 Freescale Semiconductor, Inc. - * Copyright 2017-2019 NXP + * Copyright 2017-2024 NXP * */ #ifndef _FSL_QBMAN_BASE_H @@ -141,12 +141,23 @@ struct qbman_fd { uint32_t saddr_hi; uint32_t len_sl:18; - uint32_t rsv1:14; - + uint32_t rsv13:2; + uint32_t svfid:6; + uint32_t rsv12:2; + uint32_t spfid:2; + uint32_t rsv1:2; uint32_t sportid:4; - uint32_t rsv2:22; + uint32_t rsv2:1; + uint32_t sca:1; + uint32_t sat:2; + uint32_t sattr:3; + uint32_t svfa:1; + uint32_t stc:3; uint32_t bmt:1; - uint32_t rsv3:1; + uint32_t dvfid:6; + uint32_t rsv3:2; + uint32_t dpfid:2; + uint32_t rsv31:2; uint32_t fmt:2; uint32_t sl:1; uint32_t rsv4:1; @@ -154,12 +165,14 @@ struct qbman_fd { uint32_t acc_err:4; uint32_t rsv5:4; uint32_t ser:1; - uint32_t rsv6:3; + uint32_t rsv6:2; + uint32_t wns:1; uint32_t wrttype:4; uint32_t dqos:3; uint32_t drbp:1; uint32_t dlwc:2; - uint32_t rsv7:2; + uint32_t rsv7:1; + uint32_t rns:1; uint32_t rdttype:4; uint32_t sqos:3; uint32_t srbp:1; @@ -182,7 +195,7 @@ struct qbman_fd { uint32_t saddr_lo; uint32_t saddr_hi:17; - uint32_t rsv1:15; + uint32_t rsv1_att:15; uint32_t len; diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h index 18b6a3c2e4..297d4ed4fc 100644 --- a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h +++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2015 Freescale Semiconductor, Inc. - * Copyright 2018-2020 NXP + * Copyright 2018-2023 NXP */ #ifndef _FSL_QBMAN_DEBUG_H #define _FSL_QBMAN_DEBUG_H @@ -105,16 +105,6 @@ uint32_t qbman_fq_attr_get_vfqid(struct qbman_fq_query_rslt *r); uint32_t qbman_fq_attr_get_erfqid(struct qbman_fq_query_rslt *r); uint16_t qbman_fq_attr_get_opridsz(struct qbman_fq_query_rslt *r); -/* FQ query command for non-programmable fields*/ -enum qbman_fq_schedstate_e { - qbman_fq_schedstate_oos = 0, - qbman_fq_schedstate_retired, - qbman_fq_schedstate_tentatively_scheduled, - qbman_fq_schedstate_truly_scheduled, - qbman_fq_schedstate_parked, - qbman_fq_schedstate_held_active, -}; - struct qbman_fq_query_np_rslt { uint8_t verb; uint8_t rslt; diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c index eea06988ff..0e471ec3fd 100644 --- a/drivers/bus/fslmc/qbman/qbman_debug.c +++ b/drivers/bus/fslmc/qbman/qbman_debug.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2015 Freescale Semiconductor, Inc. - * Copyright 2018-2020 NXP + * Copyright 2018-2020,2022 NXP */ #include "compat.h" @@ -37,6 +37,7 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, struct qbman_bp_query_rslt *r) { struct qbman_bp_query_desc *p; + struct qbman_bp_query_rslt *bp_query_rslt; /* Start the management command */ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s); @@ -47,14 +48,16 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, p->bpid = bpid; /* Complete the management command */ - *r = *(struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, p, - QBMAN_BP_QUERY); - if (!r) { + bp_query_rslt = (struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, + p, QBMAN_BP_QUERY); + if (!bp_query_rslt) { pr_err("qbman: Query BPID %d failed, no response\n", bpid); return -EIO; } + *r = *bp_query_rslt; + /* Decode the outcome */ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY); @@ -202,20 +205,23 @@ int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_fq_query_rslt *r) { struct qbman_fq_query_desc *p; + struct qbman_fq_query_rslt *fq_query_rslt; p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s); if (!p) return -EBUSY; p->fqid = fqid; - *r = *(struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, p, - QBMAN_FQ_QUERY); - if (!r) { + fq_query_rslt = (struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, + p, QBMAN_FQ_QUERY); + if (!fq_query_rslt) { pr_err("qbman: Query FQID %d failed, no response\n", fqid); return -EIO; } + *r = *fq_query_rslt; + /* Decode the outcome */ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY); @@ -398,20 +404,23 @@ int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_cgr_query_rslt *r) { struct qbman_cgr_query_desc *p; + struct qbman_cgr_query_rslt *cgr_query_rslt; p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s); if (!p) return -EBUSY; p->cgid = cgid; - *r = *(struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, p, - QBMAN_CGR_QUERY); - if (!r) { + cgr_query_rslt = (struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, + p, QBMAN_CGR_QUERY); + if (!cgr_query_rslt) { pr_err("qbman: Query CGID %d failed, no response\n", cgid); return -EIO; } + *r = *cgr_query_rslt; + /* Decode the outcome */ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_CGR_QUERY); @@ -473,20 +482,23 @@ int qbman_cgr_wred_query(struct qbman_swp *s, uint32_t cgid, struct qbman_wred_query_rslt *r) { struct qbman_cgr_query_desc *p; + struct qbman_wred_query_rslt *wred_query_rslt; p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s); if (!p) return -EBUSY; p->cgid = cgid; - *r = *(struct qbman_wred_query_rslt *)qbman_swp_mc_complete(s, p, - QBMAN_WRED_QUERY); - if (!r) { + wred_query_rslt = (struct qbman_wred_query_rslt *)qbman_swp_mc_complete( + s, p, QBMAN_WRED_QUERY); + if (!wred_query_rslt) { pr_err("qbman: Query CGID WRED %d failed, no response\n", cgid); return -EIO; } + *r = *wred_query_rslt; + /* Decode the outcome */ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WRED_QUERY); @@ -527,7 +539,7 @@ void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, if (mn == 0) *maxth = ma; else - *maxth = ((ma+256) * (1<<(mn-1))); + *maxth = ((uint64_t)(ma+256) * (1<<(mn-1))); if (step_s == 0) *minth = *maxth - step_i; @@ -630,6 +642,7 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, struct qbman_wqchan_query_rslt *r) { struct qbman_wqchan_query_desc *p; + struct qbman_wqchan_query_rslt *wqchan_query_rslt; /* Start the management command */ p = (struct qbman_wqchan_query_desc *)qbman_swp_mc_start(s); @@ -640,14 +653,16 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, p->chid = chanid; /* Complete the management command */ - *r = *(struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(s, p, - QBMAN_WQ_QUERY); - if (!r) { + wqchan_query_rslt = (struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete( + s, p, QBMAN_WQ_QUERY); + if (!wqchan_query_rslt) { pr_err("qbman: Query WQ Channel %d failed, no response\n", chanid); return -EIO; } + *r = *wqchan_query_rslt; + /* Decode the outcome */ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WQ_QUERY); diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c index 1f24cdce7e..5d0cedc136 100644 --- a/drivers/bus/fslmc/qbman/qbman_portal.c +++ b/drivers/bus/fslmc/qbman/qbman_portal.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. - * Copyright 2018-2020 NXP + * Copyright 2018-2020,2023-2024 NXP * */ @@ -42,6 +42,8 @@ /* opaque token for static dequeues */ #define QMAN_SDQCR_TOKEN 0xbb +#define BMAN_VALID_RSLT_NUM_MASK 0x7 + enum qbman_sdqcr_dct { qbman_sdqcr_dct_null = 0, qbman_sdqcr_dct_prio_ics, @@ -1006,9 +1008,9 @@ static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { - struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + struct qbman_eq_desc *desc = (struct qbman_eq_desc *)p; - d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + desc->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); } eqcr_pi++; @@ -2628,7 +2630,7 @@ struct qbman_acquire_rslt { uint16_t reserved; uint8_t num; uint8_t reserved2[3]; - uint64_t buf[7]; + uint64_t buf[BMAN_VALID_RSLT_NUM_MASK]; }; static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid, @@ -2636,8 +2638,9 @@ static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid, { struct qbman_acquire_desc *p; struct qbman_acquire_rslt *r; + int num; - if (!num_buffers || (num_buffers > 7)) + if (!num_buffers || (num_buffers > BMAN_VALID_RSLT_NUM_MASK)) return -EINVAL; /* Start the management command */ @@ -2668,12 +2671,13 @@ static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid, return -EIO; } - QBMAN_BUG_ON(r->num > num_buffers); + num = r->num & BMAN_VALID_RSLT_NUM_MASK; + QBMAN_BUG_ON(num > num_buffers); /* Copy the acquired buffers to the caller's array */ - u64_from_le32_copy(buffers, &r->buf[0], r->num); + u64_from_le32_copy(buffers, &r->buf[0], num); - return (int)r->num; + return num; } static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid, @@ -2681,8 +2685,9 @@ static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid, { struct qbman_acquire_desc *p; struct qbman_acquire_rslt *r; + int num; - if (!num_buffers || (num_buffers > 7)) + if (!num_buffers || (num_buffers > BMAN_VALID_RSLT_NUM_MASK)) return -EINVAL; /* Start the management command */ @@ -2713,12 +2718,13 @@ static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid, return -EIO; } - QBMAN_BUG_ON(r->num > num_buffers); + num = r->num & BMAN_VALID_RSLT_NUM_MASK; + QBMAN_BUG_ON(num > num_buffers); /* Copy the acquired buffers to the caller's array */ - u64_from_le32_copy(buffers, &r->buf[0], r->num); + u64_from_le32_copy(buffers, &r->buf[0], num); - return (int)r->num; + return num; } int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers, diff --git a/drivers/bus/fslmc/version.map b/drivers/bus/fslmc/version.map index e19b8d1f6b..2c36895285 100644 --- a/drivers/bus/fslmc/version.map +++ b/drivers/bus/fslmc/version.map @@ -24,7 +24,6 @@ INTERNAL { dpaa2_seqn_dynfield_offset; dpaa2_seqn; dpaa2_svr_family; - dpaa2_virt_mode; dpbp_disable; dpbp_enable; dpbp_get_attributes; @@ -36,6 +35,10 @@ INTERNAL { dpci_set_rx_queue; dpcon_get_attributes; dpcon_open; + dpcon_close; + dpcon_reset; + dpcon_enable; + dpcon_disable; dpdmai_close; dpdmai_disable; dpdmai_enable; @@ -52,7 +55,11 @@ INTERNAL { dpio_open; dpio_remove_static_dequeue_channel; dpio_reset; + dpio_get_stashing_destination; + dpio_get_stashing_destination_source; dpio_set_stashing_destination; + dpio_set_stashing_destination_by_core_id; + dpio_set_stashing_destination_source; mc_get_soc_version; mc_get_version; mc_send_command; @@ -110,6 +117,13 @@ INTERNAL { rte_fslmc_get_device_count; rte_fslmc_object_register; rte_global_active_dqs_list; + rte_fslmc_vfio_mem_dmaunmap; + rte_fslmc_cold_mem_vaddr_to_iova; + rte_fslmc_cold_mem_iova_to_vaddr; + rte_fslmc_mem_vaddr_to_iova; + rte_fslmc_mem_iova_to_vaddr; + rte_fslmc_io_vaddr_to_iova; + rte_fslmc_io_iova_to_vaddr; local: *; }; diff --git a/drivers/bus/ifpga/bus_ifpga_driver.h b/drivers/bus/ifpga/bus_ifpga_driver.h index a42afc7d75..af151ffd4b 100644 --- a/drivers/bus/ifpga/bus_ifpga_driver.h +++ b/drivers/bus/ifpga/bus_ifpga_driver.h @@ -77,7 +77,7 @@ struct rte_afu_device { struct rte_intr_handle *intr_handle; /**< Interrupt handle */ struct rte_afu_driver *driver; /**< Associated driver */ char path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN]; -} __rte_packed; +}; /** * @internal diff --git a/drivers/common/cnxk/hw/sso.h b/drivers/common/cnxk/hw/sso.h index 09b8d4955f..79337a8a3b 100644 --- a/drivers/common/cnxk/hw/sso.h +++ b/drivers/common/cnxk/hw/sso.h @@ -146,6 +146,7 @@ #define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull) #define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull) #define SSO_LF_GGRP_QCTL (0x20ull) +#define SSO_LF_GGRP_TAG_CFG (0x40ull) #define SSO_LF_GGRP_EXE_DIS (0x80ull) #define SSO_LF_GGRP_INT (0x100ull) #define SSO_LF_GGRP_INT_W1S (0x108ull) @@ -159,6 +160,10 @@ #define SSO_LF_GGRP_MISC_CNT (0x200ull) #define SSO_LF_GGRP_OP_AW_LMTST (0x400ull) +#define SSO_LF_GGRP_AGGR_CFG (0x300ull) +#define SSO_LF_GGRP_AGGR_CTX_BASE (0x308ull) +#define SSO_LF_GGRP_AGGR_CTX_INSTOP (0x310ull) + #define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull #define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull #define SSO_AF_IAQ_RSVD_FREE_SHIFT 16 @@ -230,5 +235,33 @@ #define SSO_TT_ATOMIC (0x1ull) #define SSO_TT_UNTAGGED (0x2ull) #define SSO_TT_EMPTY (0x3ull) +#define SSO_TT_AGG (0x3ull) + +#define SSO_LF_AGGR_INSTOP_FLUSH (0x0ull) +#define SSO_LF_AGGR_INSTOP_EVICT (0x1ull) +#define SSO_LF_AGGR_INSTOP_GLOBAL_FLUSH (0x2ull) +#define SSO_LF_AGGR_INSTOP_GLOBAL_EVICT (0x3ull) + +#define SSO_AGGR_CTX_SZ 16 +#define SSO_AGGR_NUM_CTX(a) (1 << (a + 6)) +#define SSO_AGGR_MIN_CTX SSO_AGGR_NUM_CTX(0) +#define SSO_AGGR_MAX_CTX SSO_AGGR_NUM_CTX(10) +#define SSO_AGGR_DEF_TMO 0x3Full + +struct sso_agq_ctx { + uint64_t ena : 1; + uint64_t rsvd_1_3 : 3; + uint64_t vwqe_aura : 17; + uint64_t rsvd_21_31 : 11; + uint64_t tag : 32; + uint64_t tt : 2; + uint64_t rsvd_66_67 : 2; + uint64_t swqe_tag : 12; + uint64_t max_vsize_exp : 4; + uint64_t vtimewait : 12; + uint64_t xqe_type : 4; + uint64_t cnt_ena : 1; + uint64_t rsvd_101_127 : 27; +}; #endif /* __SSO_HW_H__ */ diff --git a/drivers/common/cnxk/hw/ssow.h b/drivers/common/cnxk/hw/ssow.h index c146a8c3ef..ec6bd7896b 100644 --- a/drivers/common/cnxk/hw/ssow.h +++ b/drivers/common/cnxk/hw/ssow.h @@ -37,6 +37,7 @@ #define SSOW_LF_GWS_PRF_WQE1 (0x448ull) /* [CN10K, .) */ #define SSOW_LF_GWS_OP_GET_WORK0 (0x600ull) #define SSOW_LF_GWS_OP_GET_WORK1 (0x608ull) /* [CN10K, .) */ +#define SSOW_LF_GWS_OP_PRF_GETWORK (0x610ull) /* [CN20K, .) */ #define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull) #define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull) #define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull) diff --git a/drivers/common/cnxk/hw/tim.h b/drivers/common/cnxk/hw/tim.h index 82b094e3dc..75700a11b8 100644 --- a/drivers/common/cnxk/hw/tim.h +++ b/drivers/common/cnxk/hw/tim.h @@ -47,10 +47,15 @@ #define TIM_LF_RAS_INT_ENA_W1S (0x310) #define TIM_LF_RAS_INT_ENA_W1C (0x318) #define TIM_LF_RING_REL (0x400) +#define TIM_LF_SCHED_TIMER0 (0x480) +#define TIM_LF_RING_FIRST_EXPIRY (0x558) #define TIM_MAX_INTERVAL_TICKS ((1ULL << 32) - 1) +#define TIM_MAX_INTERVAL_EXT_TICKS ((1ULL << 34) - 1) #define TIM_MAX_BUCKET_SIZE ((1ULL << 20) - 2) #define TIM_MIN_BUCKET_SIZE 1 #define TIM_BUCKET_WRAP_SIZE 3 +#define TIM_BUCKET_MIN_GAP 1 +#define TIM_NPA_TMO 0xFFFF #endif /* __TIM_HW_H__ */ diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build index abb0f6f01f..9e3fd44317 100644 --- a/drivers/common/cnxk/meson.build +++ b/drivers/common/cnxk/meson.build @@ -71,6 +71,7 @@ sources = files( 'roc_tim_irq.c', 'roc_utils.c', 'roc_ree.c', + 'roc_rvu_lf.c', ) # Security common code @@ -108,4 +109,11 @@ deps += ['bus_pci', 'net', 'telemetry'] require_iova_in_mbuf = false +cnxk_socs = ['cn9k', 'cn10k', 'cn20k'] + +if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 1 and soc_type in cnxk_socs + warning('IOVA in mbuf is not needed for cnxk drivers on cnxk platforms. ' + + 'Set the enable_iova_as_pa option to false to save mbuf space.') +endif + annotate_locks = false diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h index 6a86863c57..93e7bf11bb 100644 --- a/drivers/common/cnxk/roc_api.h +++ b/drivers/common/cnxk/roc_api.h @@ -120,4 +120,7 @@ /* Eswitch */ #include "roc_eswitch.h" +/* RVU LF */ +#include "roc_rvu_lf.h" + #endif /* _ROC_API_H_ */ diff --git a/drivers/common/cnxk/roc_constants.h b/drivers/common/cnxk/roc_constants.h index 0e7495a37c..67cd74b28a 100644 --- a/drivers/common/cnxk/roc_constants.h +++ b/drivers/common/cnxk/roc_constants.h @@ -45,6 +45,8 @@ #define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5 #define PCI_DEVID_CNXK_RVU_ESWITCH_PF 0xA0E0 #define PCI_DEVID_CNXK_RVU_ESWITCH_VF 0xA0E1 +#define PCI_DEVID_CNXK_RVU_BPHY_PF 0xA0E4 +#define PCI_DEVID_CNXK_RVU_BPHY_VF 0xA0E5 #define PCI_DEVID_CN9K_CGX 0xA059 #define PCI_DEVID_CN10K_RPM 0xA060 @@ -63,6 +65,7 @@ #define PCI_SUBSYSTEM_DEVID_CNF10KB 0xBC00 #define PCI_SUBSYSTEM_DEVID_CN20KA 0xA020 +#define PCI_SUBSYSTEM_DEVID_CNF20KA 0xA000 #define PCI_SUBSYSTEM_DEVID_CN9KA 0x0000 #define PCI_SUBSYSTEM_DEVID_CN9KB 0xb400 diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c index c905d35ea6..32409f2ef3 100644 --- a/drivers/common/cnxk/roc_dev.c +++ b/drivers/common/cnxk/roc_dev.c @@ -218,6 +218,51 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg) return req_hdr->num_msgs; } +static int +process_rvu_lf_msgs(struct dev *dev, uint16_t vf, struct mbox_msghdr *msg, size_t size) +{ + uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8; + uint8_t req[MBOX_MSG_REQ_SIZE_MAX]; + struct msg_rsp *rsp; + uint16_t rsp_len; + void *resp; + int rc = 0; + + /* Handle BPHY mailbox message in PF */ + dev->active_vfs[vf / max_bits] |= BIT_ULL(vf % max_bits); + + if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) { + plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX); + return -1; + } + mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr), + size - sizeof(struct mbox_msghdr)); + + rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req, + size - sizeof(struct mbox_msghdr), &resp, &rsp_len); + if (rc < 0) { + plt_err("Failed to process VF%d message", vf); + return -1; + } + + rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_vfpf, vf, + rsp_len + sizeof(struct mbox_msghdr)); + if (!rsp) { + plt_err("Failed to alloc VF%d response message", vf); + return -1; + } + + mbox_rsp_init(msg->id, rsp); + + mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len); + free(resp); + /* PF/VF function ID */ + rsp->hdr.pcifunc = msg->pcifunc; + rsp->hdr.rc = 0; + + return 0; +} + /* PF receives mbox DOWN messages from VF and forwards to AF */ static int vf_pf_process_msgs(struct dev *dev, uint16_t vf) @@ -264,6 +309,9 @@ vf_pf_process_msgs(struct dev *dev, uint16_t vf) /* PF/VF function ID */ rsp->hdr.pcifunc = msg->pcifunc; rsp->hdr.rc = 0; + } else if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) { + if (process_rvu_lf_msgs(dev, vf, msg, size) < 0) + continue; } else { struct mbox_msghdr *af_req; /* Reserve AF/PF mbox message */ @@ -342,8 +390,13 @@ vf_pf_process_up_msgs(struct dev *dev, uint16_t vf) dev_get_vf(msg->pcifunc)); break; default: - plt_err("Not handled UP msg 0x%x (%s) func:0x%x", - msg->id, mbox_id2name(msg->id), msg->pcifunc); + if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) + plt_base_dbg("PF: Msg 0x%x fn:0x%x (pf:%d,vf:%d)", + msg->id, msg->pcifunc, dev_get_pf(msg->pcifunc), + dev_get_vf(msg->pcifunc)); + else + plt_err("Not handled UP msg 0x%x (%s) func:0x%x", + msg->id, mbox_id2name(msg->id), msg->pcifunc); } offset = mbox->rx_start + msg->next_msgoff; } @@ -543,16 +596,16 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg) } static int -mbox_up_handler_rep_repte_notify(struct dev *dev, struct rep_repte_req *req, struct msg_rsp *rsp) +mbox_up_handler_rep_event_up_notify(struct dev *dev, struct rep_event *req, struct msg_rsp *rsp) { struct roc_eswitch_repte_notify_msg *notify_msg; int rc = 0; + plt_base_dbg("mbox_up_handler_rep_event_up_notify"); plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func), req->hdr.id, mbox_id2name(req->hdr.id), dev_get_pf(req->hdr.pcifunc), dev_get_vf(req->hdr.pcifunc)); - plt_base_dbg("repte pcifunc %x, enable %d", req->repte_pcifunc, req->enable); if (dev->ops && dev->ops->repte_notify) { notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0); if (!notify_msg) { @@ -560,49 +613,39 @@ mbox_up_handler_rep_repte_notify(struct dev *dev, struct rep_repte_req *req, str rc = -ENOMEM; goto fail; } - notify_msg->type = ROC_ESWITCH_REPTE_STATE; - notify_msg->state.hw_func = req->repte_pcifunc; - notify_msg->state.enable = req->enable; - rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg); - if (rc < 0) - plt_err("Failed to sent new representee %x notification to %s", - req->repte_pcifunc, (req->enable == true) ? "enable" : "disable"); - - plt_free(notify_msg); - } -fail: - rsp->hdr.rc = rc; - return rc; -} - -static int -mbox_up_handler_rep_set_mtu(struct dev *dev, struct rep_mtu *req, struct msg_rsp *rsp) -{ - struct roc_eswitch_repte_notify_msg *notify_msg; - int rc = 0; - - plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func), - dev_get_vf(dev->pf_func), req->hdr.id, mbox_id2name(req->hdr.id), - dev_get_pf(req->hdr.pcifunc), dev_get_vf(req->hdr.pcifunc)); - - plt_base_dbg("rep pcifunc %x, rep id %d mtu %d", req->rep_pcifunc, req->rep_id, req->mtu); - if (dev->ops && dev->ops->repte_notify) { - notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0); - if (!notify_msg) { - plt_err("Failed to allocate memory"); - rc = -ENOMEM; + switch (req->event) { + case RVU_EVENT_PORT_STATE: + plt_base_dbg("pcifunc %x, port_state %d", req->pcifunc, + req->evt_data.port_state); + notify_msg->type = ROC_ESWITCH_LINK_STATE; + notify_msg->link.hw_func = req->pcifunc; + notify_msg->link.enable = req->evt_data.port_state; + break; + case RVU_EVENT_PFVF_STATE: + plt_base_dbg("pcifunc %x, repte_state %d", req->pcifunc, + req->evt_data.vf_state); + notify_msg->type = ROC_ESWITCH_REPTE_STATE; + notify_msg->state.hw_func = req->pcifunc; + notify_msg->state.enable = req->evt_data.vf_state; + break; + case RVU_EVENT_MTU_CHANGE: + plt_base_dbg("pcifunc %x, mtu val %d", req->pcifunc, req->evt_data.mtu); + notify_msg->type = ROC_ESWITCH_REPTE_MTU; + notify_msg->mtu.hw_func = req->pcifunc; + notify_msg->mtu.mtu = req->evt_data.mtu; + break; + default: + plt_err("Unknown event type %u", req->event); + plt_free(notify_msg); + rc = -EINVAL; goto fail; } - notify_msg->type = ROC_ESWITCH_REPTE_MTU; - notify_msg->mtu.hw_func = req->rep_pcifunc; - notify_msg->mtu.rep_id = req->rep_id; - notify_msg->mtu.mtu = req->mtu; rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg); if (rc < 0) - plt_err("Failed to send new mtu notification for representee %x ", - req->rep_pcifunc); + plt_err("Failed to send notification type %x for representee %x", + notify_msg->type, notify_msg->state.hw_func); plt_free(notify_msg); } @@ -792,6 +835,50 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req) return -ENODEV; } +static int +process_rvu_lf_msgs_up(struct dev *dev, struct mbox_msghdr *msg, size_t size) +{ + uint8_t req[MBOX_MSG_REQ_SIZE_MAX]; + struct msg_rsp *rsp; + uint16_t rsp_len; + void *resp; + int rc = 0; + + /* Check if valid, if not reply with an invalid msg */ + if (msg->sig != MBOX_REQ_SIG) + return -EIO; + + if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) { + plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX); + return -ENOMEM; + } + mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr), + size - sizeof(struct mbox_msghdr)); + rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req, + size - sizeof(struct mbox_msghdr), &resp, &rsp_len); + if (rc < 0) { + plt_err("Failed to process VF%d message", dev->vf); + return rc; + } + + rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_up, 0, + rsp_len + sizeof(struct mbox_msghdr)); + if (!rsp) { + plt_err("Failed to alloc VF%d response message", dev->vf); + return -ENOMEM; + } + + mbox_rsp_init(msg->id, rsp); + + mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len); + free(resp); + /* PF/VF function ID */ + rsp->hdr.pcifunc = msg->pcifunc; + rsp->hdr.rc = 0; + + return rc; +} + /* Received up messages from AF (PF context) / PF (in context) */ static void process_msgs_up(struct dev *dev, struct mbox *mbox) @@ -800,6 +887,7 @@ process_msgs_up(struct dev *dev, struct mbox *mbox) struct mbox_hdr *req_hdr; struct mbox_msghdr *msg; int i, err, offset; + size_t size; req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); if (req_hdr->num_msgs == 0) @@ -812,10 +900,17 @@ process_msgs_up(struct dev *dev, struct mbox *mbox) plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id, mbox_id2name(msg->id), dev_get_pf(msg->pcifunc), dev_get_vf(msg->pcifunc)); - err = mbox_process_msgs_up(dev, msg); - if (err) - plt_err("Error %d handling 0x%x (%s)", err, msg->id, - mbox_id2name(msg->id)); + if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) { + size = mbox->rx_start + msg->next_msgoff - offset; + err = process_rvu_lf_msgs_up(dev, msg, size); + if (err) + plt_err("Error %d handling 0x%x RVU_LF up msg", err, msg->id); + } else { + err = mbox_process_msgs_up(dev, msg); + if (err) + plt_err("Error %d handling 0x%x (%s)", err, msg->id, + mbox_id2name(msg->id)); + } offset = mbox->rx_start + msg->next_msgoff; } /* Send mbox responses */ @@ -1304,6 +1399,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) case PCI_DEVID_CNXK_RVU_VF: case PCI_DEVID_CNXK_RVU_SDP_VF: case PCI_DEVID_CNXK_RVU_NIX_INL_VF: + case PCI_DEVID_CNXK_RVU_BPHY_VF: case PCI_DEVID_CNXK_RVU_ESWITCH_VF: dev->hwcap |= DEV_HWCAP_F_VF; break; diff --git a/drivers/common/cnxk/roc_dev_priv.h b/drivers/common/cnxk/roc_dev_priv.h index 5ab4f72f8f..c766183196 100644 --- a/drivers/common/cnxk/roc_dev_priv.h +++ b/drivers/common/cnxk/roc_dev_priv.h @@ -46,11 +46,17 @@ typedef void (*link_status_get_t)(void *roc_nix, /* Representee notification callback */ typedef int (*repte_notify_t)(void *roc_nix, void *notify_msg); +/* RVU Message process callback */ +typedef int (*msg_process_cb_t)(uint16_t vf, uint16_t msg_id, + void *req, uint16_t req_len, + void **rsp, uint16_t *rsp_len); + struct dev_ops { link_info_t link_status_update; ptp_info_t ptp_info_update; link_status_get_t link_status_get; q_err_cb_t q_err_cb; + msg_process_cb_t msg_process_cb; repte_notify_t repte_notify; }; @@ -141,6 +147,7 @@ struct dev { void *roc_cpt; void *roc_tim; void *roc_ml; + void *roc_rvu_lf; bool disable_shared_lmt; /* false(default): shared lmt mode enabled */ const struct plt_memzone *lmt_mz; struct mbox_sync sync; diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c index 892685d185..71edfcbf9b 100644 --- a/drivers/common/cnxk/roc_dpi.c +++ b/drivers/common/cnxk/roc_dpi.c @@ -95,6 +95,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin mbox_msg.u[1] = 0; /* DPI PF driver expects vfid starts from index 0 */ mbox_msg.s.vfid = roc_dpi->vfid; + mbox_msg.s.pri = roc_dpi->priority; mbox_msg.s.cmd = DPI_QUEUE_OPEN; mbox_msg.s.csize = chunk_sz; mbox_msg.s.aura = aura; @@ -137,6 +138,7 @@ roc_dpi_configure_v2(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, mbox_msg.u[1] = 0; /* DPI PF driver expects vfid starts from index 0 */ mbox_msg.s.vfid = roc_dpi->vfid; + mbox_msg.s.pri = roc_dpi->priority; mbox_msg.s.cmd = DPI_QUEUE_OPEN_V2; mbox_msg.s.csize = chunk_sz / 8; mbox_msg.s.aura = aura; diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h index 7b4f9d4f4f..3a11559df9 100644 --- a/drivers/common/cnxk/roc_dpi.h +++ b/drivers/common/cnxk/roc_dpi.h @@ -9,6 +9,7 @@ struct roc_dpi { struct plt_pci_device *pci_dev; uint8_t *rbase; uint16_t vfid; + uint8_t priority; } __plt_cache_aligned; int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset); diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h index 844e5f37ee..1f975915f7 100644 --- a/drivers/common/cnxk/roc_dpi_priv.h +++ b/drivers/common/cnxk/roc_dpi_priv.h @@ -38,6 +38,8 @@ typedef union dpi_mbox_msg_t { uint64_t wqecs : 1; /* WQE queue DMA completion status offset */ uint64_t wqecsoff : 8; + /* Priority */ + uint64_t pri : 1; } s; } dpi_mbox_msg_t; diff --git a/drivers/common/cnxk/roc_eswitch.c b/drivers/common/cnxk/roc_eswitch.c index c67b4090a5..6cae459065 100644 --- a/drivers/common/cnxk/roc_eswitch.c +++ b/drivers/common/cnxk/roc_eswitch.c @@ -387,3 +387,14 @@ roc_eswitch_nix_repte_stats(struct roc_nix *roc_nix, uint16_t pf_func, struct ro mbox_put(mbox); return rc; } + +int +roc_eswitch_is_repte_pfs_vf(uint16_t rep_pffunc, uint16_t pf_pffunc) +{ + uint16_t rep_pf = dev_get_pf(rep_pffunc); + + if (roc_model_is_cn20k()) + return ((rep_pf << RVU_PFVF_PF_SHIFT_CN20K) == pf_pffunc); + else + return ((rep_pf << RVU_PFVF_PF_SHIFT) == pf_pffunc); +} diff --git a/drivers/common/cnxk/roc_eswitch.h b/drivers/common/cnxk/roc_eswitch.h index b701ea69ee..5e4ba5d72a 100644 --- a/drivers/common/cnxk/roc_eswitch.h +++ b/drivers/common/cnxk/roc_eswitch.h @@ -10,6 +10,7 @@ typedef enum roc_eswitch_repte_notify_msg_type { ROC_ESWITCH_REPTE_STATE = 0, + ROC_ESWITCH_LINK_STATE, ROC_ESWITCH_REPTE_MTU, } roc_eswitch_repte_notify_msg_type_t; @@ -18,6 +19,11 @@ struct roc_eswitch_repte_state { uint16_t hw_func; }; +struct roc_eswitch_link_state { + bool enable; + uint16_t hw_func; +}; + struct roc_eswitch_repte_mtu { uint16_t mtu; uint16_t rep_id; @@ -28,6 +34,7 @@ struct roc_eswitch_repte_notify_msg { roc_eswitch_repte_notify_msg_type_t type; union { struct roc_eswitch_repte_state state; + struct roc_eswitch_link_state link; struct roc_eswitch_repte_mtu mtu; }; }; @@ -36,6 +43,9 @@ struct roc_eswitch_repte_notify_msg { typedef int (*process_repte_notify_t)(void *roc_nix, struct roc_eswitch_repte_notify_msg *notify_msg); +/* Generic */ +int __roc_api roc_eswitch_is_repte_pfs_vf(uint16_t rep_pffunc, uint16_t pf_pffunc); + /* NPC */ int __roc_api roc_eswitch_npc_mcam_rx_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, uint16_t pcifunc, uint16_t vlan_tci, diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h index 6abb35c296..0002a7b5c3 100644 --- a/drivers/common/cnxk/roc_features.h +++ b/drivers/common/cnxk/roc_features.h @@ -96,4 +96,10 @@ roc_feature_nix_has_second_pass_drop(void) return 0; } +static inline bool +roc_feature_dpi_has_priority(void) +{ + return roc_model_is_cn10k(); +} + #endif diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c index 0778d51d1e..84812f73e0 100644 --- a/drivers/common/cnxk/roc_idev.c +++ b/drivers/common/cnxk/roc_idev.c @@ -38,6 +38,7 @@ idev_set_defaults(struct idev_cfg *idev) idev->num_lmtlines = 0; idev->bphy = NULL; idev->cpt = NULL; + TAILQ_INIT(&idev->rvu_lf_list); TAILQ_INIT(&idev->mcs_list); idev->nix_inl_dev = NULL; TAILQ_INIT(&idev->roc_nix_list); @@ -188,6 +189,51 @@ roc_idev_cpt_get(void) return NULL; } +struct roc_rvu_lf * +roc_idev_rvu_lf_get(uint8_t rvu_lf_idx) +{ + struct idev_cfg *idev = idev_get_cfg(); + struct roc_rvu_lf *rvu_lf = NULL; + + if (idev != NULL) { + TAILQ_FOREACH(rvu_lf, &idev->rvu_lf_list, next) { + if (rvu_lf->idx == rvu_lf_idx) + return rvu_lf; + } + } + + return NULL; +} + +void +roc_idev_rvu_lf_set(struct roc_rvu_lf *rvu) +{ + struct idev_cfg *idev = idev_get_cfg(); + struct roc_rvu_lf *rvu_lf_iter = NULL; + + if (idev != NULL) { + TAILQ_FOREACH(rvu_lf_iter, &idev->rvu_lf_list, next) { + if (rvu_lf_iter->idx == rvu->idx) + return; + } + TAILQ_INSERT_TAIL(&idev->rvu_lf_list, rvu, next); + } +} + +void +roc_idev_rvu_lf_free(struct roc_rvu_lf *rvu) +{ + struct idev_cfg *idev = idev_get_cfg(); + struct roc_rvu_lf *rvu_lf_iter = NULL; + + if (idev != NULL) { + TAILQ_FOREACH(rvu_lf_iter, &idev->rvu_lf_list, next) { + if (rvu_lf_iter->idx == rvu->idx) + TAILQ_REMOVE(&idev->rvu_lf_list, rvu, next); + } + } +} + struct roc_mcs * roc_idev_mcs_get(uint8_t mcs_idx) { diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h index fc0f7db54e..6edb5f83b2 100644 --- a/drivers/common/cnxk/roc_idev.h +++ b/drivers/common/cnxk/roc_idev.h @@ -29,4 +29,8 @@ uint16_t *__roc_api roc_idev_nix_rx_chan_base_get(void); void __roc_api roc_idev_nix_rx_chan_set(uint16_t port, uint16_t chan); uint16_t __roc_api roc_idev_nix_inl_dev_pffunc_get(void); + +struct roc_rvu_lf *__roc_api roc_idev_rvu_lf_get(uint8_t rvu_lf_idx); +void __roc_api roc_idev_rvu_lf_set(struct roc_rvu_lf *rvu); +void __roc_api roc_idev_rvu_lf_free(struct roc_rvu_lf *rvu); #endif /* _ROC_IDEV_H_ */ diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h index 6628b18152..98b6286bfe 100644 --- a/drivers/common/cnxk/roc_idev_priv.h +++ b/drivers/common/cnxk/roc_idev_priv.h @@ -36,6 +36,7 @@ struct idev_cfg { struct roc_bphy *bphy; struct roc_cpt *cpt; struct roc_sso *sso; + struct roc_rvu_lf_head rvu_lf_list; struct roc_mcs_head mcs_list; struct nix_inl_dev *nix_inl_dev; struct idev_nix_inl_cfg inl_cfg; diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index dd65946e9e..f362d55bc2 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -54,6 +54,8 @@ struct mbox_msghdr { #define MBOX_MSG_MASK 0xFFFF #define MBOX_MSG_INVALID 0xFFFE #define MBOX_MSG_MAX 0xFFFF +#define MBOX_MSG_GENERIC_MAX_ID 0x1FF +#define MBOX_MSG_REQ_SIZE_MAX (16 * 1024) #define MBOX_MESSAGES \ /* Generic mbox IDs (range 0x000 - 0x1FF) */ \ @@ -147,6 +149,11 @@ struct mbox_msghdr { msg_rsp) \ M(SSO_GRP_STASH_CONFIG, 0x614, sso_grp_stash_config, \ sso_grp_stash_cfg, msg_rsp) \ + M(SSO_AGGR_SET_CONFIG, 0x615, sso_aggr_setconfig, sso_aggr_setconfig, \ + msg_rsp) \ + M(SSO_AGGR_GET_STATS, 0x616, sso_aggr_get_stats, sso_info_req, \ + sso_aggr_stats) \ + M(SSO_GET_HW_INFO, 0x617, sso_get_hw_info, msg_req, sso_hw_info) \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, tim_lf_alloc_req, \ tim_lf_alloc_rsp) \ @@ -159,6 +166,9 @@ struct mbox_msghdr { tim_intvl_rsp) \ M(TIM_CAPTURE_COUNTERS, 0x806, tim_capture_counters, msg_req, \ tim_capture_rsp) \ + M(TIM_CONFIG_HWWQE, 0x807, tim_config_hwwqe, tim_cfg_hwwqe_req, \ + msg_rsp) \ + M(TIM_GET_HW_INFO, 0x808, tim_get_hw_info, msg_req, tim_hw_info) \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, msg_rsp) \ M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \ @@ -361,9 +371,7 @@ struct mbox_msghdr { #define MBOX_UP_MCS_MESSAGES M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp) -#define MBOX_UP_REP_MESSAGES \ -M(REP_REPTE_NOTIFY, 0xEF1, rep_repte_notify, rep_repte_req, msg_rsp) \ -M(REP_SET_MTU, 0xEF2, rep_set_mtu, rep_mtu, msg_rsp) +#define MBOX_UP_REP_MESSAGES M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_##_name = _id, @@ -2119,6 +2127,33 @@ struct ssow_chng_mship { uint16_t __io hwgrps[MAX_RVU_BLKLF_CNT]; /* Array of hwgrps. */ }; +struct sso_feat_info { + uint8_t __io hw_flr : 1; + uint8_t __io hw_prefetch : 1; + uint8_t __io sw_prefetch : 1; + uint8_t __io lsw : 1; + uint8_t __io fwd_grp : 1; + uint8_t __io eva_present : 1; + uint8_t __io no_nsched : 1; + uint8_t __io tag_cfg : 1; + uint8_t __io gwc_per_core; + uint16_t __io hws; + uint16_t __io hwgrps; + uint16_t __io hwgrps_per_pf; + uint16_t __io iue; + uint16_t __io taq_lines; + uint16_t __io taq_ent_per_line; + uint16_t __io xaq_buf_size; + uint16_t __io xaq_wq_entries; + uint32_t __io eva_ctx_per_hwgrp; + uint64_t __io rsvd[2]; +}; + +struct sso_hw_info { + struct mbox_msghdr hdr; + struct sso_feat_info feat; +}; + struct sso_hw_setconfig { struct mbox_msghdr hdr; uint32_t __io npa_aura_id; @@ -2163,6 +2198,13 @@ struct sso_grp_stash_cfg { uint8_t __io num_linesm1 : 4; }; +struct sso_aggr_setconfig { + struct mbox_msghdr hdr; + uint16_t __io npa_pf_func; + uint16_t __io hwgrp; + uint64_t __io rsvd[2]; +}; + struct sso_grp_stats { struct mbox_msghdr hdr; uint16_t __io grp; @@ -2182,6 +2224,16 @@ struct sso_hws_stats { uint64_t __io arbitration; }; +struct sso_aggr_stats { + struct mbox_msghdr hdr; + uint16_t __io grp; + uint64_t __io flushed; + uint64_t __io completed; + uint64_t __io npa_fail; + uint64_t __io timeout; + uint64_t __io rsvd[4]; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ @@ -2754,6 +2806,7 @@ enum tim_af_status { TIM_AF_INVALID_ENABLE_DONTFREE = -815, TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816, TIM_AF_RING_ALREADY_DISABLED = -817, + TIM_AF_LF_START_SYNC_FAIL = -818, }; enum tim_clk_srcs { @@ -2846,13 +2899,43 @@ struct tim_config_req { uint8_t __io enabledontfreebuffer; uint32_t __io bucketsize; uint32_t __io chunksize; - uint32_t __io interval; + uint32_t __io interval_lo; uint8_t __io gpioedge; - uint8_t __io rsvd[7]; + uint8_t __io rsvd[3]; + uint32_t __io interval_hi; uint64_t __io intervalns; uint64_t __io clockfreq; }; +struct tim_cfg_hwwqe_req { + struct mbox_msghdr hdr; + uint16_t __io ring; + uint8_t __io grp_ena; + uint8_t __io hwwqe_ena; + uint8_t __io ins_min_gap; + uint8_t __io flw_ctrl_ena; + uint8_t __io wqe_rd_clr_ena; + uint16_t __io grp_tmo_cntr; + uint16_t __io npa_tmo_cntr; + uint16_t __io result_offset; + uint16_t __io event_count_offset; + uint64_t __io rsvd[2]; +}; + +struct tim_feat_info { + uint16_t __io rings; + uint8_t __io engines; + uint8_t __io hwwqe : 1; + uint8_t __io intvl_ext : 1; + uint8_t __io rsvd8[4]; + uint64_t __io rsvd[2]; +}; + +struct tim_hw_info { + struct mbox_msghdr hdr; + struct tim_feat_info feat; +}; + struct tim_lf_alloc_rsp { struct mbox_msghdr hdr; uint64_t __io tenns_clk; @@ -2919,16 +3002,24 @@ struct nix_spi_to_sa_delete_req { uint8_t __io way; }; -struct rep_repte_req { - struct mbox_msghdr hdr; - uint16_t __io repte_pcifunc; - bool __io enable; +struct rep_evt_data { + uint8_t __io port_state; + uint8_t __io vf_state; + uint16_t __io rx_mode; + uint16_t __io rx_flags; + uint16_t __io mtu; + uint64_t __io rsvd[5]; }; -struct rep_mtu { +struct rep_event { struct mbox_msghdr hdr; - uint16_t __io rep_pcifunc; - uint16_t __io rep_id; - uint16_t __io mtu; + uint16_t __io pcifunc; +#define RVU_EVENT_PORT_STATE BIT_ULL(0) +#define RVU_EVENT_PFVF_STATE BIT_ULL(1) +#define RVU_EVENT_MTU_CHANGE BIT_ULL(2) +#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3) + uint16_t __io event; + struct rep_evt_data evt_data; }; + #endif /* __ROC_MBOX_H__ */ diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h index 4e686bea2c..0de141b0cc 100644 --- a/drivers/common/cnxk/roc_model.h +++ b/drivers/common/cnxk/roc_model.h @@ -8,6 +8,7 @@ #include #include "roc_bits.h" +#include "roc_constants.h" extern struct roc_model *roc_model; @@ -157,6 +158,18 @@ roc_model_is_cn20k(void) return roc_model_runtime_is_cn20k(); } +static inline uint16_t +roc_model_optimal_align_sz(void) +{ + if (roc_model_is_cn9k()) + return ROC_ALIGN; + if (roc_model_is_cn10k()) + return ROC_ALIGN; + if (roc_model_is_cn20k()) + return ROC_ALIGN << 1; + return 128; +} + static inline uint64_t roc_model_is_cn98xx(void) { diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h index 3fd6fcbe9f..eb64608885 100644 --- a/drivers/common/cnxk/roc_nix_priv.h +++ b/drivers/common/cnxk/roc_nix_priv.h @@ -488,4 +488,12 @@ int nix_tel_node_add_rq(struct roc_nix_rq *rq); int nix_tel_node_add_cq(struct roc_nix_cq *cq); int nix_tel_node_add_sq(struct roc_nix_sq *sq); +/* + * RSS + */ +int nix_rss_reta_pffunc_set(struct roc_nix *roc_nix, uint8_t group, + uint16_t reta[ROC_NIX_RSS_RETA_MAX], uint16_t pf_func); +int nix_rss_flowkey_pffunc_set(struct roc_nix *roc_nix, uint8_t *alg_idx, uint32_t flowkey, + uint8_t group, int mcam_index, uint16_t pf_func); + #endif /* _ROC_NIX_PRIV_H_ */ diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index 06029275af..e852211ba4 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -794,9 +794,6 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo aq->rq.good_utag = rq->tag_mask >> 24; aq->rq.bad_utag = rq->tag_mask >> 24; aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0); - - if (rq->vwqe_ena) - aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle); } else { /* CQ mode */ aq->rq.sso_ena = 0; @@ -881,8 +878,6 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, boo aq->rq_mask.good_utag = ~aq->rq_mask.good_utag; aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag; aq->rq_mask.ltag = ~aq->rq_mask.ltag; - if (rq->vwqe_ena) - aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura; } else { /* CQ mode */ aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena; diff --git a/drivers/common/cnxk/roc_nix_rss.c b/drivers/common/cnxk/roc_nix_rss.c index fd1472e9b9..ac5df654ff 100644 --- a/drivers/common/cnxk/roc_nix_rss.c +++ b/drivers/common/cnxk/roc_nix_rss.c @@ -116,7 +116,7 @@ nix_cn9k_rss_reta_set(struct nix *nix, uint8_t group, static int nix_cn10k_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_MAX], - uint8_t lock_rx_ctx) + uint8_t lock_rx_ctx, uint16_t pf_func) { struct mbox *mbox = mbox_get((&nix->dev)->mbox); struct nix_cn10k_aq_enq_req *req; @@ -138,6 +138,9 @@ nix_cn10k_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS goto exit; } } + if (pf_func) + req->hdr.pcifunc = pf_func; + req->rss.rq = reta[idx]; /* Fill AQ info */ req->qidx = (group * nix->reta_sz) + idx; @@ -161,6 +164,8 @@ nix_cn10k_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS goto exit; } } + if (pf_func) + req->hdr.pcifunc = pf_func; req->rss.rq = reta[idx]; /* Fill AQ info */ req->qidx = (group * nix->reta_sz) + idx; @@ -180,7 +185,7 @@ nix_cn10k_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS static int nix_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_MAX], - uint8_t lock_rx_ctx) + uint8_t lock_rx_ctx, uint16_t pf_func) { struct mbox *mbox = mbox_get((&nix->dev)->mbox); struct nix_cn20k_aq_enq_req *req; @@ -225,6 +230,8 @@ nix_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_ goto exit; } } + if (pf_func) + req->hdr.pcifunc = pf_func; req->rss.rq = reta[idx]; /* Fill AQ info */ req->qidx = (group * nix->reta_sz) + idx; @@ -243,8 +250,8 @@ nix_rss_reta_set(struct nix *nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_ } int -roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, - uint16_t reta[ROC_NIX_RSS_RETA_MAX]) +nix_rss_reta_pffunc_set(struct roc_nix *roc_nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_MAX], + uint16_t pf_func) { struct nix *nix = roc_nix_to_nix_priv(roc_nix); int rc; @@ -253,12 +260,11 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, return NIX_ERR_PARAM; if (roc_model_is_cn9k()) - rc = nix_cn9k_rss_reta_set(nix, group, reta, - roc_nix->lock_rx_ctx); + rc = nix_cn9k_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx); else if (roc_model_is_cn10k()) - rc = nix_cn10k_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx); + rc = nix_cn10k_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx, pf_func); else - rc = nix_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx); + rc = nix_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx, pf_func); if (rc) return rc; @@ -267,8 +273,13 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, } int -roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group, - uint16_t reta[ROC_NIX_RSS_RETA_MAX]) +roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_MAX]) +{ + return nix_rss_reta_pffunc_set(roc_nix, group, reta, 0); +} + +int +roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group, uint16_t reta[ROC_NIX_RSS_RETA_MAX]) { struct nix *nix = roc_nix_to_nix_priv(roc_nix); @@ -280,8 +291,8 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group, } int -roc_nix_rss_flowkey_set(struct roc_nix *roc_nix, uint8_t *alg_idx, - uint32_t flowkey, uint8_t group, int mcam_index) +nix_rss_flowkey_pffunc_set(struct roc_nix *roc_nix, uint8_t *alg_idx, uint32_t flowkey, + uint8_t group, int mcam_index, uint16_t pf_func) { struct nix *nix = roc_nix_to_nix_priv(roc_nix); struct nix_rss_flowkey_cfg_rsp *rss_rsp; @@ -297,6 +308,9 @@ roc_nix_rss_flowkey_set(struct roc_nix *roc_nix, uint8_t *alg_idx, cfg = mbox_alloc_msg_nix_rss_flowkey_cfg(mbox); if (cfg == NULL) goto exit; + if (pf_func) + cfg->hdr.pcifunc = pf_func; + cfg->flowkey_cfg = flowkey; cfg->mcam_index = mcam_index; /* -1 indicates default group */ cfg->group = group; /* 0 is default group */ @@ -311,6 +325,13 @@ roc_nix_rss_flowkey_set(struct roc_nix *roc_nix, uint8_t *alg_idx, return rc; } +int +roc_nix_rss_flowkey_set(struct roc_nix *roc_nix, uint8_t *alg_idx, uint32_t flowkey, uint8_t group, + int mcam_index) +{ + return nix_rss_flowkey_pffunc_set(roc_nix, alg_idx, flowkey, group, mcam_index, 0); +} + int roc_nix_rss_default_setup(struct roc_nix *roc_nix, uint32_t flowkey) { diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c index 934d7361a9..a33f9a8499 100644 --- a/drivers/common/cnxk/roc_npa.c +++ b/drivers/common/cnxk/roc_npa.c @@ -17,6 +17,12 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb) return 0; } +uint16_t +roc_npa_pf_func_get(void) +{ + return idev_npa_pffunc_get(); +} + void roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova, uint64_t end_iova) diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h index fbf75b2fca..f7cb4460e7 100644 --- a/drivers/common/cnxk/roc_npa.h +++ b/drivers/common/cnxk/roc_npa.h @@ -820,6 +820,9 @@ int __roc_api roc_npa_aura_bp_configure(uint64_t aura_id, uint16_t bpid, uint8_t typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev); int __roc_api roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb); +/* Utility functions */ +uint16_t __roc_api roc_npa_pf_func_get(void); + /* Debug */ int __roc_api roc_npa_ctx_dump(void); int __roc_api roc_npa_dump(void); diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c index 8a951b6360..53f278c764 100644 --- a/drivers/common/cnxk/roc_npc.c +++ b/drivers/common/cnxk/roc_npc.c @@ -389,7 +389,8 @@ roc_npc_fini(struct roc_npc *roc_npc) struct npc *npc = roc_npc_to_npc_priv(roc_npc); int rc; - npc_aging_ctrl_thread_destroy(roc_npc); + if (!roc_npc->flow_age.aged_flows_get_thread_exit) + npc_aging_ctrl_thread_destroy(roc_npc); rc = npc_flow_free_all_resources(npc); if (rc) { @@ -1052,9 +1053,9 @@ npc_rss_free_grp_get(struct npc *npc, uint32_t *pos) } int -npc_rss_action_configure(struct roc_npc *roc_npc, - const struct roc_npc_action_rss *rss, uint8_t *alg_idx, - uint32_t *rss_grp, uint32_t mcam_id) +npc_rss_action_configure(struct roc_npc *roc_npc, const struct roc_npc_action_rss *rss, + uint8_t *alg_idx, uint32_t *rss_grp, uint32_t mcam_id, + uint16_t rss_repte_pf_func) { struct npc *npc = roc_npc_to_npc_priv(roc_npc); struct roc_nix *roc_nix = roc_npc->roc_nix; @@ -1097,7 +1098,7 @@ npc_rss_action_configure(struct roc_npc *roc_npc, if (rc < 0 || rss_grp_idx == 0) return -ENOSPC; - for (i = 0; i < rss->queue_num; i++) { + for (i = 0; (i < rss->queue_num) && !rss_repte_pf_func; i++) { if (rss->queue[i] >= nix->nb_rx_queues) { plt_err("queue id > max number of queues"); return -EINVAL; @@ -1125,10 +1126,9 @@ npc_rss_action_configure(struct roc_npc *roc_npc, rem = nix->reta_sz % rss->queue_num; if (rem) - memcpy(&reta[i * rss->queue_num], rss->queue, - rem * sizeof(uint16_t)); + memcpy(&reta[i * rss->queue_num], rss->queue, rem * sizeof(uint16_t)); - rc = roc_nix_rss_reta_set(roc_nix, *rss_grp, reta); + rc = nix_rss_reta_pffunc_set(roc_nix, *rss_grp, reta, rss_repte_pf_func); if (rc) { plt_err("Failed to init rss table rc = %d", rc); return rc; @@ -1136,8 +1136,8 @@ npc_rss_action_configure(struct roc_npc *roc_npc, flowkey_cfg = roc_npc->flowkey_cfg_state; - rc = roc_nix_rss_flowkey_set(roc_nix, &flowkey_algx, flowkey_cfg, - *rss_grp, mcam_id); + rc = nix_rss_flowkey_pffunc_set(roc_nix, &flowkey_algx, flowkey_cfg, *rss_grp, mcam_id, + rss_repte_pf_func); if (rc) { plt_err("Failed to set rss hash function rc = %d", rc); return rc; @@ -1169,7 +1169,8 @@ npc_rss_action_program(struct roc_npc *roc_npc, for (; actions->type != ROC_NPC_ACTION_TYPE_END; actions++) { if (actions->type == ROC_NPC_ACTION_TYPE_RSS) { rss = (const struct roc_npc_action_rss *)actions->conf; - rc = npc_rss_action_configure(npc, rss, &alg_idx, &rss_grp, flow->mcam_id); + rc = npc_rss_action_configure(npc, rss, &alg_idx, &rss_grp, flow->mcam_id, + actions->rss_repte_pf_func); if (rc) return rc; @@ -1677,6 +1678,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, goto set_rss_failed; } roc_npc->rep_npc = NULL; + roc_npc->rep_act_pf_func = 0; if (flow->has_age_action) npc_age_flow_list_entry_add(roc_npc, flow); @@ -1699,6 +1701,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, set_rss_failed: roc_npc->rep_npc = NULL; + roc_npc->rep_act_pf_func = 0; if (flow->use_pre_alloc == 0) { rc = roc_npc_mcam_free_entry(roc_npc, flow->mcam_id); if (rc != 0) { @@ -1711,6 +1714,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, } err_exit: roc_npc->rep_npc = NULL; + roc_npc->rep_act_pf_func = 0; plt_free(flow); return NULL; } diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h index 72aada84a1..bf8c65aa9c 100644 --- a/drivers/common/cnxk/roc_npc.h +++ b/drivers/common/cnxk/roc_npc.h @@ -203,6 +203,7 @@ enum roc_npc_action_type { struct roc_npc_action { enum roc_npc_action_type type; /**< Action type. */ const void *conf; /**< Pointer to action configuration object. */ + uint16_t rss_repte_pf_func; /**< Per flow tmp var for rss representee pffunc */ }; struct roc_npc_action_sample { diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h index 069c625911..e08a34d146 100644 --- a/drivers/common/cnxk/roc_npc_priv.h +++ b/drivers/common/cnxk/roc_npc_priv.h @@ -484,7 +484,8 @@ int npc_process_ipv6_field_hash(const struct roc_npc_flow_item_ipv6 *ipv6_spec, const struct roc_npc_flow_item_ipv6 *ipv6_mask, struct npc_parse_state *pst, uint8_t type); int npc_rss_action_configure(struct roc_npc *roc_npc, const struct roc_npc_action_rss *rss, - uint8_t *alg_idx, uint32_t *rss_grp, uint32_t mcam_id); + uint8_t *alg_idx, uint32_t *rss_grp, uint32_t mcam_id, + uint16_t rss_repte_pf_func); int npc_rss_action_program(struct roc_npc *roc_npc, const struct roc_npc_action actions[], struct roc_npc_flow *flow); int npc_rss_group_free(struct npc *npc, struct roc_npc_flow *flow); diff --git a/drivers/common/cnxk/roc_priv.h b/drivers/common/cnxk/roc_priv.h index 254a2d3310..8732a05ac3 100644 --- a/drivers/common/cnxk/roc_priv.h +++ b/drivers/common/cnxk/roc_priv.h @@ -53,4 +53,6 @@ /* ML */ #include "roc_ml_priv.h" +/* RVU LF */ +#include "roc_rvu_lf_priv.h" #endif /* _ROC_PRIV_H_ */ diff --git a/drivers/common/cnxk/roc_rvu_lf.c b/drivers/common/cnxk/roc_rvu_lf.c new file mode 100644 index 0000000000..862a201135 --- /dev/null +++ b/drivers/common/cnxk/roc_rvu_lf.c @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include "roc_api.h" +#include "roc_priv.h" + +int +roc_rvu_lf_dev_init(struct roc_rvu_lf *roc_rvu_lf) +{ + struct plt_pci_device *pci_dev; + struct dev *dev; + struct rvu_lf *rvu; + int rc; + + if (roc_rvu_lf == NULL || roc_rvu_lf->pci_dev == NULL) + return RVU_ERR_PARAM; + + rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + pci_dev = roc_rvu_lf->pci_dev; + dev = &rvu->dev; + + if (rvu->dev.drv_inited) + return 0; + + if (dev->mbox_active) + goto skip_dev_init; + + memset(rvu, 0, sizeof(*rvu)); + + /* Initialize device */ + rc = dev_init(dev, pci_dev); + if (rc) { + plt_err("Failed to init roc device"); + goto fail; + } + +skip_dev_init: + dev->roc_rvu_lf = roc_rvu_lf; + rvu->pci_dev = pci_dev; + + roc_idev_rvu_lf_set(roc_rvu_lf); + rvu->dev.drv_inited = true; + + return 0; +fail: + return rc; +} + +int +roc_rvu_lf_dev_fini(struct roc_rvu_lf *roc_rvu_lf) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + + if (rvu == NULL) + return NIX_ERR_PARAM; + + rvu->dev.drv_inited = false; + + roc_idev_rvu_lf_free(roc_rvu_lf); + + return dev_fini(&rvu->dev, rvu->pci_dev); +} + +uint16_t +roc_rvu_lf_pf_func_get(struct roc_rvu_lf *roc_rvu_lf) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + struct dev *dev = &rvu->dev; + + return dev->pf_func; +} + +int +roc_rvu_lf_msg_id_range_set(struct roc_rvu_lf *roc_rvu_lf, uint16_t from, uint16_t to) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + + if (from <= MBOX_MSG_GENERIC_MAX_ID || from > to) + return -EINVAL; + + rvu->msg_id_from = from; + rvu->msg_id_to = to; + + return 0; +} + +bool +roc_rvu_lf_msg_id_range_check(struct roc_rvu_lf *roc_rvu_lf, uint16_t msg_id) +{ + struct rvu_lf *rvu; + + if (roc_rvu_lf == NULL) + return 0; + + rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + + if (msg_id > rvu->msg_id_from && msg_id < rvu->msg_id_to) + return 1; + + return 0; +} + +int +roc_rvu_lf_msg_process(struct roc_rvu_lf *roc_rvu_lf, uint16_t vf, uint16_t msg_id, + void *req_data, uint16_t req_len, void *rsp_data, uint16_t rsp_len) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + struct mbox *mbox; + struct rvu_lf_msg *req; + struct rvu_lf_msg *rsp; + int rc = -ENOSPC; + int devid = 0; + + if (rvu->dev.vf == -1 && roc_rvu_lf_msg_id_range_check(roc_rvu_lf, msg_id)) { + /* This is PF context */ + if (vf >= rvu->dev.maxvf) + return -EINVAL; + devid = vf; + mbox = mbox_get(&rvu->dev.mbox_vfpf_up); + } else { + /* This is VF context */ + devid = 0; /* VF send all message to PF */ + mbox = mbox_get(rvu->dev.mbox); + } + req = (struct rvu_lf_msg *)mbox_alloc_msg_rsp(mbox, devid, + req_len + sizeof(struct rvu_lf_msg), + rsp_len + sizeof(struct rvu_lf_msg)); + if (!req) + goto fail; + mbox_memcpy(req->data, req_data, req_len); + req->hdr.sig = MBOX_REQ_SIG; + req->hdr.id = msg_id; + req->hdr.pcifunc = roc_rvu_lf_pf_func_get(roc_rvu_lf); + + if (rvu->dev.vf == -1) { + mbox_msg_send_up(mbox, devid); + rc = mbox_get_rsp(mbox, devid, (void *)&rsp); + if (rc) + goto fail; + } else { + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto fail; + } + if (rsp_len && rsp_data != NULL) + mbox_memcpy(rsp_data, rsp->data, rsp_len); +fail: + mbox_put(mbox); + return rc; +} + +int +roc_rvu_lf_irq_register(struct roc_rvu_lf *roc_rvu_lf, unsigned int irq, + roc_rvu_lf_intr_cb_fn cb, void *data) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + struct plt_intr_handle *handle; + + handle = rvu->pci_dev->intr_handle; + + return dev_irq_register(handle, (plt_intr_callback_fn)cb, data, irq); +} + +int +roc_rvu_lf_irq_unregister(struct roc_rvu_lf *roc_rvu_lf, unsigned int irq, + roc_rvu_lf_intr_cb_fn cb, void *data) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + struct plt_intr_handle *handle; + + handle = rvu->pci_dev->intr_handle; + + dev_irq_unregister(handle, (plt_intr_callback_fn)cb, data, irq); + + return 0; +} + +int +roc_rvu_lf_msg_handler_register(struct roc_rvu_lf *roc_rvu_lf, roc_rvu_lf_msg_handler_cb_fn cb) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + struct dev *dev = &rvu->dev; + + if (cb == NULL) + return -EINVAL; + + dev->ops->msg_process_cb = (msg_process_cb_t)cb; + + return 0; +} + +int +roc_rvu_lf_msg_handler_unregister(struct roc_rvu_lf *roc_rvu_lf) +{ + struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf); + struct dev *dev = &rvu->dev; + + dev->ops->msg_process_cb = NULL; + + return 0; +} diff --git a/drivers/common/cnxk/roc_rvu_lf.h b/drivers/common/cnxk/roc_rvu_lf.h new file mode 100644 index 0000000000..8c71876cbc --- /dev/null +++ b/drivers/common/cnxk/roc_rvu_lf.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef _ROC_RVU_LF_H_ +#define _ROC_RVU_LF_H_ + +#include "roc_platform.h" + +struct roc_rvu_lf { + TAILQ_ENTRY(roc_rvu_lf) next; + struct plt_pci_device *pci_dev; + uint8_t idx; +#define ROC_RVU_MEM_SZ (6 * 1024) + uint8_t reserved[ROC_RVU_MEM_SZ] __plt_cache_aligned; +}; + +TAILQ_HEAD(roc_rvu_lf_head, roc_rvu_lf); + +/* Dev */ +int __roc_api roc_rvu_lf_dev_init(struct roc_rvu_lf *roc_rvu_lf); +int __roc_api roc_rvu_lf_dev_fini(struct roc_rvu_lf *roc_rvu_lf); + +uint16_t __roc_api roc_rvu_lf_pf_func_get(struct roc_rvu_lf *roc_rvu_lf); + +int __roc_api roc_rvu_lf_msg_process(struct roc_rvu_lf *roc_rvu_lf, + uint16_t vf, uint16_t msg_id, + void *req_data, uint16_t req_len, + void *rsp_data, uint16_t rsp_len); + +int __roc_api roc_rvu_lf_msg_id_range_set(struct roc_rvu_lf *roc_rvu_lf, + uint16_t from, uint16_t to); +bool __roc_api roc_rvu_lf_msg_id_range_check(struct roc_rvu_lf *roc_rvu_lf, uint16_t msg_id); +typedef void (*roc_rvu_lf_intr_cb_fn)(void *cb_arg); +typedef int (*roc_rvu_lf_msg_handler_cb_fn)(uint16_t vf, uint16_t msg_id, + void *req, uint16_t req_len, + void **rsp, uint16_t *rsp_len); + +int __roc_api roc_rvu_lf_irq_register(struct roc_rvu_lf *roc_rvu_lf, unsigned int irq, + roc_rvu_lf_intr_cb_fn cb, void *cb_arg); +int __roc_api roc_rvu_lf_irq_unregister(struct roc_rvu_lf *roc_rvu_lf, unsigned int irq, + roc_rvu_lf_intr_cb_fn cb, void *cb_arg); +int __roc_api roc_rvu_lf_msg_handler_register(struct roc_rvu_lf *roc_rvu_lf, + roc_rvu_lf_msg_handler_cb_fn cb); +int __roc_api roc_rvu_lf_msg_handler_unregister(struct roc_rvu_lf *roc_rvu_lf); +#endif /* _ROC_RVU_LF_H_ */ diff --git a/drivers/common/cnxk/roc_rvu_lf_priv.h b/drivers/common/cnxk/roc_rvu_lf_priv.h new file mode 100644 index 0000000000..57bb713b21 --- /dev/null +++ b/drivers/common/cnxk/roc_rvu_lf_priv.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef _ROC_RVU_LF_PRIV_H_ +#define _ROC_RVU_LF_PRIV_H_ + +enum rvu_err_status { + RVU_ERR_PARAM = -1, + RVU_ERR_NO_MEM = -2, +}; + +struct rvu_lf { + struct plt_pci_device *pci_dev; + struct dev dev; + uint16_t msg_id_from; + uint16_t msg_id_to; +}; + +struct rvu_lf_msg { + struct mbox_msghdr hdr; + uint8_t data[]; +}; + +static inline struct rvu_lf * +roc_rvu_lf_to_rvu_priv(struct roc_rvu_lf *roc_rvu_lf) +{ + return (struct rvu_lf *)&roc_rvu_lf->reserved[0]; +} + +static inline struct roc_rvu_lf * +rvu_priv_to_roc_rvu_lf(struct rvu_lf *rvu_lf) +{ + return (struct roc_rvu_lf *)((char *)rvu_lf - offsetof(struct roc_rvu_lf, reserved)); +} + +#endif /* _ROC_RVU_LF_PRIV_H_ */ diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c index 2e3b134bfc..4996329018 100644 --- a/drivers/common/cnxk/roc_sso.c +++ b/drivers/common/cnxk/roc_sso.c @@ -191,7 +191,7 @@ sso_rsrc_get(struct roc_sso *roc_sso) goto exit; } - roc_sso->max_hwgrp = rsrc_cnt->sso; + roc_sso->max_hwgrp = PLT_MIN(rsrc_cnt->sso, roc_sso->feat.hwgrps_per_pf); roc_sso->max_hws = rsrc_cnt->ssow; rc = 0; @@ -200,6 +200,37 @@ sso_rsrc_get(struct roc_sso *roc_sso) return rc; } +static int +sso_hw_info_get(struct roc_sso *roc_sso) +{ + struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct mbox *mbox = mbox_get(dev->mbox); + struct sso_hw_info *rsp; + int rc; + + mbox_alloc_msg_sso_get_hw_info(mbox); + rc = mbox_process_msg(mbox, (void **)&rsp); + if (rc && rc != MBOX_MSG_INVALID) { + plt_err("Failed to get SSO HW info"); + rc = -EIO; + goto exit; + } + + if (rc == MBOX_MSG_INVALID) { + roc_sso->feat.hwgrps_per_pf = ROC_SSO_MAX_HWGRP_PER_PF; + } else { + mbox_memcpy(&roc_sso->feat, &rsp->feat, sizeof(roc_sso->feat)); + + if (!roc_sso->feat.hwgrps_per_pf) + roc_sso->feat.hwgrps_per_pf = ROC_SSO_MAX_HWGRP_PER_PF; + } + + rc = 0; +exit: + mbox_put(mbox); + return rc; +} + void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, uint16_t hwgrp[], uint16_t n, uint8_t set, uint16_t enable) @@ -319,6 +350,12 @@ roc_sso_hwgrp_base_get(struct roc_sso *roc_sso, uint16_t hwgrp) return dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | hwgrp << 12); } +uint16_t +roc_sso_pf_func_get(void) +{ + return idev_sso_pffunc_get(); +} + uint64_t roc_sso_ns_to_gw(uint64_t base, uint64_t ns) { @@ -463,9 +500,231 @@ roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws, mbox_put(mbox); } +static void +sso_agq_op_wait(struct roc_sso *roc_sso, uint16_t hwgrp) +{ + uint64_t reg; + + reg = plt_read64(roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); + while (reg & BIT_ULL(2)) { + plt_delay_us(100); + reg = plt_read64(roc_sso_hwgrp_base_get(roc_sso, hwgrp) + + SSO_LF_GGRP_AGGR_CTX_INSTOP); + } +} + +int +roc_sso_hwgrp_agq_alloc(struct roc_sso *roc_sso, uint16_t hwgrp, struct roc_sso_agq_data *data) +{ + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct sso_aggr_setconfig *req; + struct sso_agq_ctx *ctx; + uint32_t cnt, off; + struct mbox *mbox; + uintptr_t ptr; + uint64_t reg; + int rc; + + if (sso->agg_mem[hwgrp] == 0) { + mbox = mbox_get(sso->dev.mbox); + req = mbox_alloc_msg_sso_aggr_setconfig(mbox); + if (req == NULL) { + mbox_process(mbox); + req = mbox_alloc_msg_sso_aggr_setconfig(mbox); + if (req == NULL) { + plt_err("Failed to allocate AGQ config mbox."); + mbox_put(mbox); + return -EIO; + } + } + + req->hwgrp = hwgrp; + req->npa_pf_func = idev_npa_pffunc_get(); + rc = mbox_process(mbox); + if (rc < 0) { + plt_err("Failed to set HWGRP AGQ config rc=%d", rc); + mbox_put(mbox); + return rc; + } + + mbox_put(mbox); + + sso->agg_mem[hwgrp] = + (uintptr_t)plt_zmalloc(SSO_AGGR_MIN_CTX * sizeof(struct sso_agq_ctx), + roc_model_optimal_align_sz()); + if (sso->agg_mem[hwgrp] == 0) + return -ENOMEM; + sso->agg_cnt[hwgrp] = SSO_AGGR_MIN_CTX; + sso->agg_used[hwgrp] = 0; + plt_wmb(); + plt_write64(sso->agg_mem[hwgrp], + roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_BASE); + reg = (plt_log2_u32(SSO_AGGR_MIN_CTX) - 6) << 16; + reg |= (SSO_AGGR_DEF_TMO << 4) | 1; + plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); + } + + if (sso->agg_cnt[hwgrp] >= SSO_AGGR_MAX_CTX) + return -ENOSPC; + + if (sso->agg_cnt[hwgrp] == sso->agg_used[hwgrp]) { + ptr = sso->agg_mem[hwgrp]; + cnt = sso->agg_cnt[hwgrp] << 1; + sso->agg_mem[hwgrp] = (uintptr_t)plt_zmalloc(cnt * sizeof(struct sso_agq_ctx), + roc_model_optimal_align_sz()); + if (sso->agg_mem[hwgrp] == 0) { + sso->agg_mem[hwgrp] = ptr; + return -ENOMEM; + } + + memcpy((void *)sso->agg_mem[hwgrp], (void *)ptr, + sso->agg_cnt[hwgrp] * sizeof(struct sso_agq_ctx)); + plt_wmb(); + sso_agq_op_wait(roc_sso, hwgrp); + /* Base address has changed, evict old entries. */ + plt_write64(sso->agg_mem[hwgrp], + roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_BASE); + reg = plt_read64(roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); + reg &= ~GENMASK_ULL(19, 16); + reg |= (uint64_t)(plt_log2_u32(cnt) - 6) << 16; + plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); + reg = SSO_LF_AGGR_INSTOP_GLOBAL_EVICT << 4; + plt_write64(reg, + roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); + sso_agq_op_wait(roc_sso, hwgrp); + plt_free((void *)ptr); + + sso->agg_cnt[hwgrp] = cnt; + off = sso->agg_used[hwgrp]; + } else { + ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; + for (cnt = 0; cnt < sso->agg_cnt[hwgrp]; cnt++) { + if (!ctx[cnt].ena) + break; + } + if (cnt == sso->agg_cnt[hwgrp]) + return -EINVAL; + off = cnt; + } + + ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; + ctx += off; + ctx->ena = 1; + ctx->tt = data->tt; + ctx->tag = data->tag; + ctx->swqe_tag = data->stag; + ctx->cnt_ena = data->cnt_ena; + ctx->xqe_type = data->xqe_type; + ctx->vtimewait = data->vwqe_wait_tmo; + ctx->vwqe_aura = data->vwqe_aura; + ctx->max_vsize_exp = data->vwqe_max_sz_exp - 2; + + plt_wmb(); + sso->agg_used[hwgrp]++; + + return 0; +} + +void +roc_sso_hwgrp_agq_free(struct roc_sso *roc_sso, uint16_t hwgrp, uint32_t agq_id) +{ + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct sso_agq_ctx *ctx; + uint64_t reg; + + ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; + ctx += agq_id; + + if (!ctx->ena) + return; + + reg = SSO_LF_AGGR_INSTOP_FLUSH << 4; + reg |= (uint64_t)(agq_id << 8); + + plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); + sso_agq_op_wait(roc_sso, hwgrp); + + memset(ctx, 0, sizeof(struct sso_agq_ctx)); + plt_wmb(); + sso->agg_used[hwgrp]--; + + /* Flush the context from CTX Cache */ + reg = SSO_LF_AGGR_INSTOP_EVICT << 4; + reg |= (uint64_t)(agq_id << 8); + + plt_write64(reg, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_INSTOP); + sso_agq_op_wait(roc_sso, hwgrp); +} + +void +roc_sso_hwgrp_agq_release(struct roc_sso *roc_sso, uint16_t hwgrp) +{ + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct sso_aggr_setconfig *req; + struct sso_agq_ctx *ctx; + struct mbox *mbox; + uint32_t cnt; + int rc; + + if (!roc_sso->feat.eva_present) + return; + + plt_write64(0, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CFG); + ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; + for (cnt = 0; cnt < sso->agg_cnt[hwgrp]; cnt++) { + if (!ctx[cnt].ena) + continue; + roc_sso_hwgrp_agq_free(roc_sso, hwgrp, cnt); + } + + plt_write64(0, roc_sso_hwgrp_base_get(roc_sso, hwgrp) + SSO_LF_GGRP_AGGR_CTX_BASE); + plt_free((void *)sso->agg_mem[hwgrp]); + sso->agg_mem[hwgrp] = 0; + sso->agg_cnt[hwgrp] = 0; + sso->agg_used[hwgrp] = 0; + + mbox = mbox_get(sso->dev.mbox); + req = mbox_alloc_msg_sso_aggr_setconfig(mbox); + if (req == NULL) { + mbox_process(mbox); + req = mbox_alloc_msg_sso_aggr_setconfig(mbox); + if (req == NULL) { + plt_err("Failed to allocate AGQ config mbox."); + mbox_put(mbox); + return; + } + } + + req->hwgrp = hwgrp; + req->npa_pf_func = 0; + rc = mbox_process(mbox); + if (rc < 0) + plt_err("Failed to set HWGRP AGQ config rc=%d", rc); + mbox_put(mbox); +} + +uint32_t +roc_sso_hwgrp_agq_from_tag(struct roc_sso *roc_sso, uint16_t hwgrp, uint32_t tag_mask, + uint8_t xqe_type) +{ + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct sso_agq_ctx *ctx; + uint32_t i; + + plt_rmb(); + ctx = (struct sso_agq_ctx *)sso->agg_mem[hwgrp]; + for (i = 0; i < sso->agg_used[hwgrp]; i++) { + if (!ctx[i].ena) + continue; + if (ctx[i].tag == tag_mask && ctx[i].xqe_type == xqe_type) + return i; + } + + return UINT32_MAX; +} + int -roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp, - struct roc_sso_hwgrp_stats *stats) +roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint16_t hwgrp, struct roc_sso_hwgrp_stats *stats) { struct sso *sso = roc_sso_to_sso_priv(roc_sso); struct sso_grp_stats *req_rsp; @@ -670,9 +929,8 @@ roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae) struct dev *dev = &sso->dev; int rc; - rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae, - roc_sso->xae_waes, roc_sso->xaq_buf_size, - roc_sso->nb_hwgrp); + rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae, roc_sso->feat.xaq_wq_entries, + roc_sso->feat.xaq_buf_size, roc_sso->nb_hwgrp); return rc; } @@ -834,7 +1092,10 @@ sso_update_msix_vec_count(struct roc_sso *roc_sso, uint16_t sso_vec_cnt) if (idev == NULL) return -ENODEV; - mbox_vec_cnt = RVU_PF_INT_VEC_AFPF_MBOX + 1; + if (roc_model_is_cn20k()) + mbox_vec_cnt = RVU_MBOX_PF_INT_VEC_AFPF_MBOX + 1; + else + mbox_vec_cnt = RVU_PF_INT_VEC_AFPF_MBOX + 1; /* Allocating vectors for the first time */ if (plt_intr_max_intr_get(pci_dev->intr_handle) == 0) { @@ -953,9 +1214,11 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp, ui goto hwgrp_alloc_fail; } - roc_sso->xaq_buf_size = rsp_hwgrp->xaq_buf_size; - roc_sso->xae_waes = rsp_hwgrp->xaq_wq_entries; - roc_sso->iue = rsp_hwgrp->in_unit_entries; + if (!roc_sso->feat.xaq_buf_size || !roc_sso->feat.xaq_wq_entries || !roc_sso->feat.iue) { + roc_sso->feat.xaq_buf_size = rsp_hwgrp->xaq_buf_size; + roc_sso->feat.xaq_wq_entries = rsp_hwgrp->xaq_wq_entries; + roc_sso->feat.iue = rsp_hwgrp->in_unit_entries; + } rc = sso_msix_fill(roc_sso, nb_hws, nb_hwgrp); if (rc < 0) { @@ -979,7 +1242,10 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp, ui } /* 2 error interrupt per TIM LF */ - sso_vec_cnt += 2 * nb_tim_lfs; + if (roc_model_is_cn20k()) + sso_vec_cnt += 3 * nb_tim_lfs; + else + sso_vec_cnt += 2 * nb_tim_lfs; rc = sso_update_msix_vec_count(roc_sso, sso_vec_cnt); if (rc < 0) { @@ -1014,10 +1280,14 @@ void roc_sso_rsrc_fini(struct roc_sso *roc_sso) { struct sso *sso = roc_sso_to_sso_priv(roc_sso); + uint32_t cnt; if (!roc_sso->nb_hws && !roc_sso->nb_hwgrp) return; + for (cnt = 0; cnt < roc_sso->nb_hwgrp; cnt++) + roc_sso_hwgrp_agq_release(roc_sso, cnt); + sso_unregister_irqs_priv(roc_sso, sso->pci_dev->intr_handle, roc_sso->nb_hws, roc_sso->nb_hwgrp); sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, roc_sso->nb_hws); @@ -1059,6 +1329,12 @@ roc_sso_dev_init(struct roc_sso *roc_sso) goto fail; } + rc = sso_hw_info_get(roc_sso); + if (rc < 0) { + plt_err("Failed to get SSO HW info"); + goto fail; + } + rc = sso_rsrc_get(roc_sso); if (rc < 0) { plt_err("Failed to get SSO resources"); diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h index 4ac901762e..f73128087a 100644 --- a/drivers/common/cnxk/roc_sso.h +++ b/drivers/common/cnxk/roc_sso.h @@ -8,7 +8,7 @@ #include "hw/ssow.h" #define ROC_SSO_AW_PER_LMT_LINE_LOG2 3 -#define ROC_SSO_XAE_PER_XAQ 352 +#define ROC_SSO_MAX_HWGRP_PER_PF 256 struct roc_sso_hwgrp_qos { uint16_t hwgrp; @@ -47,6 +47,17 @@ struct roc_sso_xaq_data { void *mem; }; +struct roc_sso_agq_data { + uint8_t tt; + uint8_t cnt_ena; + uint8_t xqe_type; + uint16_t stag; + uint32_t tag; + uint32_t vwqe_max_sz_exp; + uint64_t vwqe_wait_tmo; + uint64_t vwqe_aura; +}; + struct roc_sso { struct plt_pci_device *pci_dev; /* Public data. */ @@ -57,9 +68,7 @@ struct roc_sso { uintptr_t lmt_base; struct roc_sso_xaq_data xaq; /* HW Const. */ - uint32_t xae_waes; - uint32_t xaq_buf_size; - uint32_t iue; + struct sso_feat_info feat; /* Private data. */ #define ROC_SSO_MEM_SZ (16 * 1024) uint8_t reserved[ROC_SSO_MEM_SZ] __plt_cache_aligned; @@ -102,11 +111,20 @@ int __roc_api roc_sso_hwgrp_stash_config(struct roc_sso *roc_sso, uint16_t nb_stash); void __roc_api roc_sso_hws_gwc_invalidate(struct roc_sso *roc_sso, uint8_t *hws, uint8_t nb_hws); +int __roc_api roc_sso_hwgrp_agq_alloc(struct roc_sso *roc_sso, uint16_t hwgrp, + struct roc_sso_agq_data *data); +void __roc_api roc_sso_hwgrp_agq_free(struct roc_sso *roc_sso, uint16_t hwgrp, uint32_t agq_id); +void __roc_api roc_sso_hwgrp_agq_release(struct roc_sso *roc_sso, uint16_t hwgrp); +uint32_t __roc_api roc_sso_hwgrp_agq_from_tag(struct roc_sso *roc_sso, uint16_t hwgrp, uint32_t tag, + uint8_t xqe_type); + +/* Utility function */ +uint16_t __roc_api roc_sso_pf_func_get(void); /* Debug */ void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t hwgrp, FILE *f); -int __roc_api roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp, +int __roc_api roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint16_t hwgrp, struct roc_sso_hwgrp_stats *stats); int __roc_api roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws, struct roc_sso_hws_stats *stats); diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h index 21c59c57e6..d6dc6dedd3 100644 --- a/drivers/common/cnxk/roc_sso_priv.h +++ b/drivers/common/cnxk/roc_sso_priv.h @@ -13,6 +13,10 @@ struct sso_rsrc { struct sso { struct plt_pci_device *pci_dev; struct dev dev; + /* EVA memory area */ + uintptr_t agg_mem[MAX_RVU_BLKLF_CNT]; + uint32_t agg_used[MAX_RVU_BLKLF_CNT]; + uint32_t agg_cnt[MAX_RVU_BLKLF_CNT]; /* Interrupt handler args. */ struct sso_rsrc hws_rsrc[MAX_RVU_BLKLF_CNT]; struct sso_rsrc hwgrp_rsrc[MAX_RVU_BLKLF_CNT]; diff --git a/drivers/common/cnxk/roc_tim.c b/drivers/common/cnxk/roc_tim.c index 83228fb2b6..a1461fedb1 100644 --- a/drivers/common/cnxk/roc_tim.c +++ b/drivers/common/cnxk/roc_tim.c @@ -5,6 +5,8 @@ #include "roc_api.h" #include "roc_priv.h" +#define LF_ENABLE_RETRY_CNT 8 + static int tim_fill_msix(struct roc_tim *roc_tim, uint16_t nb_ring) { @@ -86,8 +88,11 @@ tim_err_desc(int rc) case TIM_AF_RING_ALREADY_DISABLED: plt_err("Ring already stopped"); break; + case TIM_AF_LF_START_SYNC_FAIL: + plt_err("Ring start sync failed."); + break; default: - plt_err("Unknown Error."); + plt_err("Unknown Error: %d", rc); } } @@ -123,10 +128,12 @@ roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc, struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); struct dev *dev = &sso->dev; struct mbox *mbox = mbox_get(dev->mbox); + uint8_t retry_cnt = LF_ENABLE_RETRY_CNT; struct tim_enable_rsp *rsp; struct tim_ring_req *req; int rc = -ENOSPC; +retry: req = mbox_alloc_msg_tim_enable_ring(mbox); if (req == NULL) goto fail; @@ -134,6 +141,9 @@ roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc, rc = mbox_process_msg(dev->mbox, (void **)&rsp); if (rc) { + if (rc == TIM_AF_LF_START_SYNC_FAIL && retry_cnt--) + goto retry; + tim_err_desc(rc); rc = -EIO; goto fail; @@ -183,10 +193,9 @@ roc_tim_lf_base_get(struct roc_tim *roc_tim, uint8_t ring_id) } int -roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, - enum roc_tim_clk_src clk_src, uint8_t ena_periodic, - uint8_t ena_dfb, uint32_t bucket_sz, uint32_t chunk_sz, - uint32_t interval, uint64_t intervalns, uint64_t clockfreq) +roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, enum roc_tim_clk_src clk_src, + uint8_t ena_periodic, uint8_t ena_dfb, uint32_t bucket_sz, uint32_t chunk_sz, + uint64_t interval, uint64_t intervalns, uint64_t clockfreq) { struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); struct dev *dev = &sso->dev; @@ -204,7 +213,8 @@ roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, req->clocksource = clk_src; req->enableperiodic = ena_periodic; req->enabledontfreebuffer = ena_dfb; - req->interval = interval; + req->interval_lo = interval; + req->interval_hi = interval >> 32; req->intervalns = intervalns; req->clockfreq = clockfreq; req->gpioedge = TIM_GPIO_LTOH_TRANS; @@ -220,6 +230,41 @@ roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, return rc; } +int +roc_tim_lf_config_hwwqe(struct roc_tim *roc_tim, uint8_t ring_id, struct roc_tim_hwwqe_cfg *cfg) +{ + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); + struct dev *dev = &sso->dev; + struct mbox *mbox = mbox_get(dev->mbox); + struct tim_cfg_hwwqe_req *req; + int rc = -ENOSPC; + + req = mbox_alloc_msg_tim_config_hwwqe(mbox); + if (req == NULL) + goto fail; + req->ring = ring_id; + req->hwwqe_ena = cfg->hwwqe_ena; + req->grp_ena = cfg->grp_ena; + req->grp_tmo_cntr = cfg->grp_tmo_cyc; + req->flw_ctrl_ena = cfg->flw_ctrl_ena; + req->result_offset = cfg->result_offset; + req->event_count_offset = cfg->event_count_offset; + + req->wqe_rd_clr_ena = 1; + req->npa_tmo_cntr = TIM_NPA_TMO; + req->ins_min_gap = TIM_BUCKET_MIN_GAP; + + rc = mbox_process(mbox); + if (rc) { + tim_err_desc(rc); + rc = -EIO; + } + +fail: + mbox_put(mbox); + return rc; +} + int roc_tim_lf_interval(struct roc_tim *roc_tim, enum roc_tim_clk_src clk_src, uint64_t clockfreq, uint64_t *intervalns, @@ -353,6 +398,31 @@ tim_free_lf_count_get(struct dev *dev, uint16_t *nb_lfs) return 0; } +static int +tim_hw_info_get(struct roc_tim *roc_tim) +{ + struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct mbox *mbox = mbox_get(dev->mbox); + struct tim_hw_info *rsp; + int rc; + + mbox_alloc_msg_tim_get_hw_info(mbox); + rc = mbox_process_msg(mbox, (void **)&rsp); + if (rc && rc != MBOX_MSG_INVALID) { + plt_err("Failed to get TIM HW info"); + rc = -EIO; + goto exit; + } + + if (rc != MBOX_MSG_INVALID) + mbox_memcpy(&roc_tim->feat, &rsp->feat, sizeof(roc_tim->feat)); + + rc = 0; +exit: + mbox_put(mbox); + return rc; +} + int roc_tim_init(struct roc_tim *roc_tim) { @@ -372,6 +442,12 @@ roc_tim_init(struct roc_tim *roc_tim) PLT_STATIC_ASSERT(sizeof(struct tim) <= TIM_MEM_SZ); nb_lfs = roc_tim->nb_lfs; + rc = tim_hw_info_get(roc_tim); + if (rc) { + plt_tim_dbg("Failed to get TIM HW info"); + return 0; + } + rc = tim_free_lf_count_get(dev, &nb_free_lfs); if (rc) { plt_tim_dbg("Failed to get TIM resource count"); diff --git a/drivers/common/cnxk/roc_tim.h b/drivers/common/cnxk/roc_tim.h index f9a9ad1887..2eb6e6962b 100644 --- a/drivers/common/cnxk/roc_tim.h +++ b/drivers/common/cnxk/roc_tim.h @@ -19,10 +19,20 @@ enum roc_tim_clk_src { ROC_TIM_CLK_SRC_INVALID, }; +struct roc_tim_hwwqe_cfg { + uint8_t grp_ena; + uint8_t hwwqe_ena; + uint8_t flw_ctrl_ena; + uint16_t grp_tmo_cyc; + uint16_t result_offset; + uint16_t event_count_offset; +}; + struct roc_tim { struct roc_sso *roc_sso; /* Public data. */ uint16_t nb_lfs; + struct tim_feat_info feat; /* Private data. */ #define TIM_MEM_SZ (1 * 1024) uint8_t reserved[TIM_MEM_SZ] __plt_cache_aligned; @@ -36,11 +46,11 @@ int __roc_api roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc, uint32_t *cur_bkt); int __roc_api roc_tim_lf_disable(struct roc_tim *roc_tim, uint8_t ring_id); int __roc_api roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, - enum roc_tim_clk_src clk_src, - uint8_t ena_periodic, uint8_t ena_dfb, - uint32_t bucket_sz, uint32_t chunk_sz, - uint32_t interval, uint64_t intervalns, - uint64_t clockfreq); + enum roc_tim_clk_src clk_src, uint8_t ena_periodic, uint8_t ena_dfb, + uint32_t bucket_sz, uint32_t chunk_sz, uint64_t interval, + uint64_t intervalns, uint64_t clockfreq); +int __roc_api roc_tim_lf_config_hwwqe(struct roc_tim *roc_tim, uint8_t ring_id, + struct roc_tim_hwwqe_cfg *cfg); int __roc_api roc_tim_lf_interval(struct roc_tim *roc_tim, enum roc_tim_clk_src clk_src, uint64_t clockfreq, uint64_t *intervalns, diff --git a/drivers/common/cnxk/roc_utils.c b/drivers/common/cnxk/roc_utils.c index 9af2ae9b69..bcc0d2de38 100644 --- a/drivers/common/cnxk/roc_utils.c +++ b/drivers/common/cnxk/roc_utils.c @@ -17,11 +17,13 @@ roc_error_msg_get(int errorcode) case NPC_ERR_PARAM: case SSO_ERR_PARAM: case MCS_ERR_PARAM: + case RVU_ERR_PARAM: case UTIL_ERR_PARAM: err_msg = "Invalid parameter"; break; case NIX_ERR_NO_MEM: case NPC_ERR_NO_MEM: + case RVU_ERR_NO_MEM: err_msg = "Out of memory"; break; case NIX_ERR_INVALID_RANGE: diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 877333b80c..37b76c020e 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -90,6 +90,7 @@ INTERNAL { roc_dpi_enable; roc_dpi_wait_queue_idle; roc_error_msg_get; + roc_eswitch_is_repte_pfs_vf; roc_eswitch_nix_process_repte_notify_cb_register; roc_eswitch_nix_process_repte_notify_cb_unregister; roc_eswitch_nix_repte_stats; @@ -119,6 +120,9 @@ INTERNAL { roc_idev_nix_rx_chan_set; roc_idev_nix_rx_inject_get; roc_idev_nix_rx_inject_set; + roc_idev_rvu_lf_free; + roc_idev_rvu_lf_get; + roc_idev_rvu_lf_set; roc_ml_reg_read64; roc_ml_reg_write64; roc_ml_reg_read32; @@ -449,6 +453,7 @@ INTERNAL { roc_npa_dev_unlock; roc_npa_dump; roc_npa_lf_init_cb_register; + roc_npa_pf_func_get; roc_npa_pool_create; roc_npa_pool_destroy; roc_npa_pool_op_pc_reset; @@ -500,6 +505,10 @@ INTERNAL { roc_sso_dev_fini; roc_sso_dev_init; roc_sso_dump; + roc_sso_hwgrp_agq_alloc; + roc_sso_hwgrp_agq_free; + roc_sso_hwgrp_agq_from_tag; + roc_sso_hwgrp_agq_release; roc_sso_hwgrp_alloc_xaq; roc_sso_hwgrp_base_get; roc_sso_hwgrp_free_xaq_aura; @@ -516,6 +525,7 @@ INTERNAL { roc_sso_hws_gwc_invalidate; roc_sso_hws_unlink; roc_sso_ns_to_gw; + roc_sso_pf_func_get; roc_sso_rsrc_fini; roc_sso_rsrc_init; roc_tim_fini; @@ -523,6 +533,7 @@ INTERNAL { roc_tim_lf_alloc; roc_tim_lf_base_get; roc_tim_lf_config; + roc_tim_lf_config_hwwqe; roc_tim_lf_disable; roc_tim_lf_enable; roc_tim_lf_free; @@ -544,5 +555,16 @@ INTERNAL { roc_ree_rule_db_get; roc_ree_rule_db_len_get; roc_ree_rule_db_prog; + roc_rvu_lf_dev_fini; + roc_rvu_lf_dev_init; + roc_rvu_lf_irq_register; + roc_rvu_lf_irq_unregister; + roc_rvu_lf_msg_handler_register; + roc_rvu_lf_msg_handler_unregister; + roc_rvu_lf_msg_id_range_check; + roc_rvu_lf_msg_id_range_set; + roc_rvu_lf_msg_process; + roc_rvu_lf_pf_func_get; + local: *; }; diff --git a/drivers/common/dpaax/compat.h b/drivers/common/dpaax/compat.h index cbabc1588b..7c8d82c2b2 100644 --- a/drivers/common/dpaax/compat.h +++ b/drivers/common/dpaax/compat.h @@ -30,6 +30,7 @@ #include #include #include + #include #include #include @@ -37,6 +38,7 @@ #include #include #include +#include /* The following definitions are primarily to allow the single-source driver * interfaces to be included by arbitrary program code. Ie. for interfaces that @@ -142,8 +144,8 @@ static inline void out_be32(volatile void *__p, u32 val) #define hwsync() rte_rmb() #define lwsync() rte_wmb() -#define dcbt_ro(p) __builtin_prefetch(p, 0) -#define dcbt_rw(p) __builtin_prefetch(p, 1) +#define dcbt_ro(p) rte_prefetch0(p) +#define dcbt_rw(p) rte_prefetch0_write(p) #if defined(RTE_ARCH_ARM) #if defined(RTE_ARCH_64) diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build index a162779116..db61b76ce3 100644 --- a/drivers/common/dpaax/meson.build +++ b/drivers/common/dpaax/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2018 NXP +# Copyright 2018, 2024 NXP if not is_linux build = false @@ -16,3 +16,4 @@ endif if cc.has_argument('-Wno-pointer-arith') cflags += '-Wno-pointer-arith' endif +headers = files('rte_pmd_dpaax_qdma.h') diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h new file mode 100644 index 0000000000..8f2fc83f8d --- /dev/null +++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2021-2024 NXP + */ + +#ifndef RTE_PMD_DPAAX_QDMA_H +#define RTE_PMD_DPAAX_QDMA_H + +#include + +#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8 +#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \ + RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET) +#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \ + (RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1) +#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \ + (((uint64_t)idx_addr) | (flag)) + +#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \ + ((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag)) + +#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64 +#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63) + +#endif /* RTE_PMD_DPAAX_QDMA_H */ diff --git a/drivers/common/idpf/base/README b/drivers/common/idpf/base/README index ff26f736ec..457b5a81f3 100644 --- a/drivers/common/idpf/base/README +++ b/drivers/common/idpf/base/README @@ -18,4 +18,4 @@ Updating the driver NOTE: The source code in this directory should not be modified apart from the following file(s): - idpf_osdep.h \ No newline at end of file + idpf_osdep.h diff --git a/drivers/common/idpf/base/meson.build b/drivers/common/idpf/base/meson.build index 649c44d0ae..f30ec7dfc2 100644 --- a/drivers/common/idpf/base/meson.build +++ b/drivers/common/idpf/base/meson.build @@ -6,8 +6,11 @@ sources += files( 'idpf_controlq_setup.c', ) -cflags += ['-Wno-unused-value'] -cflags += ['-Wno-unused-variable'] -cflags += ['-Wno-unused-parameter'] -cflags += ['-Wno-implicit-fallthrough'] -cflags += ['-Wno-strict-aliasing'] +error_cflags = [ + '-Wno-unused-variable', +] +foreach flag: error_cflags + if cc.has_argument(flag) + cflags += flag + endif +endforeach diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c index 9710dcedd3..a75f011750 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/drivers/common/mlx5/mlx5_devx_cmds.c @@ -1027,6 +1027,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp); attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz); attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz); + attr->log_max_wq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_wq_sz); attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz); attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd); attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq); diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h index 6cf7999c46..f523bf8529 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.h +++ b/drivers/common/mlx5/mlx5_devx_cmds.h @@ -267,10 +267,11 @@ struct mlx5_hca_attr { struct mlx5_hca_flow_attr flow; struct mlx5_hca_flex_attr flex; struct mlx5_hca_crypto_mmo_attr crypto_mmo; - int log_max_qp_sz; - int log_max_cq_sz; - int log_max_qp; - int log_max_cq; + uint8_t log_max_wq_sz; + uint8_t log_max_qp_sz; + uint8_t log_max_cq_sz; + uint8_t log_max_qp; + uint8_t log_max_cq; uint32_t log_max_pd; uint32_t log_max_mrw_sz; uint32_t log_max_srq; diff --git a/drivers/common/nfp/nfp_common.c b/drivers/common/nfp/nfp_common.c index 40e1620c2e..0df8332dfb 100644 --- a/drivers/common/nfp/nfp_common.c +++ b/drivers/common/nfp/nfp_common.c @@ -25,7 +25,7 @@ nfp_reconfig_real(struct nfp_hw *hw, hw->qcp_cfg); if (hw->qcp_cfg == NULL) { - PMD_DRV_LOG(ERR, "Bad configuration queue pointer"); + PMD_DRV_LOG(ERR, "Bad configuration queue pointer."); return -ENXIO; } @@ -43,12 +43,12 @@ nfp_reconfig_real(struct nfp_hw *hw, break; if ((new & NFP_NET_CFG_UPDATE_ERR) != 0) { - PMD_DRV_LOG(ERR, "Reconfig error: %#08x", new); + PMD_DRV_LOG(ERR, "Reconfig error: %#08x.", new); return -1; } if (cnt >= NFP_NET_POLL_TIMEOUT) { - PMD_DRV_LOG(ERR, "Reconfig timeout for %#08x after %u ms", + PMD_DRV_LOG(ERR, "Reconfig timeout for %#08x after %u ms.", update, cnt); return -EIO; } @@ -56,7 +56,7 @@ nfp_reconfig_real(struct nfp_hw *hw, nanosleep(&wait, 0); /* waiting for a 1ms */ } - PMD_DRV_LOG(DEBUG, "Ack DONE"); + PMD_DRV_LOG(DEBUG, "Ack DONE."); return 0; } @@ -96,7 +96,7 @@ nfp_reconfig(struct nfp_hw *hw, rte_spinlock_unlock(&hw->reconfig_lock); if (ret != 0) { - PMD_DRV_LOG(ERR, "Error nfp reconfig: ctrl=%#08x update=%#08x", + PMD_DRV_LOG(ERR, "Error NFP reconfig: ctrl=%#08x update=%#08x.", ctrl, update); return -EIO; } @@ -140,7 +140,7 @@ nfp_ext_reconfig(struct nfp_hw *hw, rte_spinlock_unlock(&hw->reconfig_lock); if (ret != 0) { - PMD_DRV_LOG(ERR, "Error nfp ext reconfig: ctrl_ext=%#08x update=%#08x", + PMD_DRV_LOG(ERR, "Error NFP ext reconfig: ctrl_ext=%#08x update=%#08x.", ctrl_ext, update); return -EIO; } diff --git a/drivers/common/nfp/nfp_common_pci.c b/drivers/common/nfp/nfp_common_pci.c index 5c36052f9d..856f3917a2 100644 --- a/drivers/common/nfp/nfp_common_pci.c +++ b/drivers/common/nfp/nfp_common_pci.c @@ -191,7 +191,7 @@ nfp_drivers_probe(struct rte_pci_device *pci_dev, ret = driver->probe(pci_dev); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to load driver %s", driver->name); + PMD_DRV_LOG(ERR, "Failed to load driver %s.", driver->name); return ret; } } @@ -206,11 +206,11 @@ nfp_common_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, enum nfp_class class; struct rte_device *eal_dev = &pci_dev->device; - PMD_DRV_LOG(INFO, "probe device %s.", eal_dev->name); + PMD_DRV_LOG(INFO, "Probe device %s.", eal_dev->name); class = nfp_parse_class_options(eal_dev->devargs); if (class == NFP_CLASS_INVALID) { - PMD_DRV_LOG(ERR, "Unsupported nfp class type: %s", + PMD_DRV_LOG(ERR, "Unsupported nfp class type: %s.", eal_dev->devargs->args); return -ENOTSUP; } diff --git a/drivers/common/nfp/nfp_platform.h b/drivers/common/nfp/nfp_platform.h index 1687942e41..0b02fcf1e8 100644 --- a/drivers/common/nfp/nfp_platform.h +++ b/drivers/common/nfp/nfp_platform.h @@ -8,6 +8,8 @@ #include +#include + #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define DMA_BIT_MASK(n) ((1ULL << (n)) - 1) @@ -21,7 +23,7 @@ #define GENMASK_ULL(h, l) \ ((~0ULL << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - (h) - 1))) -#define __bf_shf(x) (__builtin_ffsll(x) - 1) +#define __bf_shf(x) rte_bsf64(x) #define FIELD_GET(_mask, _reg) \ (__extension__ ({ \ diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c index 88ea032bcb..dbebc5aef1 100644 --- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c +++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c @@ -11,10 +11,7 @@ #include -#include "roc_cpt.h" -#include "roc_idev.h" -#include "roc_sso.h" -#include "roc_sso_dp.h" +#include "roc_api.h" #include "cn10k_cryptodev.h" #include "cn10k_cryptodev_event_dp.h" diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c index ae00af5019..8d10bc9f9b 100644 --- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c +++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c @@ -8,14 +8,7 @@ #include #include -#include "roc_cpt.h" -#if defined(__aarch64__) -#include "roc_io.h" -#else -#include "roc_io_generic.h" -#endif -#include "roc_sso.h" -#include "roc_sso_dp.h" +#include "roc_api.h" #include "cn9k_cryptodev.h" #include "cn9k_cryptodev_ops.h" diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index b34183d594..ec6577f64c 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -50,6 +50,7 @@ #define FSL_SUBSYSTEM_SEC 1 #define FSL_MC_DPSECI_DEVID 3 +#define DPAA2_DEFAULT_NAT_T_PORT 4500 #define NO_PREFETCH 0 #define DRIVER_DUMP_MODE "drv_dump_mode" @@ -65,6 +66,47 @@ enum dpaa2_sec_dump_levels { uint8_t cryptodev_driver_id; uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP; +static inline void +dpaa2_sec_dp_fd_dump(const struct qbman_fd *fd, uint16_t bpid, + struct rte_mbuf *mbuf, bool tx) +{ +#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) + char debug_str[1024]; + int offset; + + if (tx) { + offset = sprintf(debug_str, + "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ", + DPAA2_GET_FD_ADDR(fd), + bpid < MAX_BPID ? "SW" : "BMAN"); + if (bpid < MAX_BPID) { + offset += sprintf(&debug_str[offset], + "bpid = %d ", bpid); + } + } else { + offset = sprintf(debug_str, "Mbuf %p from %s pool ", + mbuf, DPAA2_GET_FD_IVP(fd) ? "SW" : "BMAN"); + if (!DPAA2_GET_FD_IVP(fd)) { + offset += sprintf(&debug_str[offset], "bpid = %d ", + DPAA2_GET_FD_BPID(fd)); + } + } + offset += sprintf(&debug_str[offset], + "private size = %d ", + mbuf->pool->private_data_size); + offset += sprintf(&debug_str[offset], + "addr %p, fdaddr =%" PRIx64 ", off =%d, len =%d", + mbuf->buf_addr, DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG("%s", debug_str); +#else + RTE_SET_USED(bpid); + RTE_SET_USED(tx); + RTE_SET_USED(mbuf); + RTE_SET_USED(fd); +#endif +} + static inline void free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) { @@ -1107,10 +1149,6 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, struct rte_mbuf *mbuf; uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv.offset); -#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) - char debug_str[1024]; - int offset; -#endif data_len = sym_op->cipher.data.length; data_offset = sym_op->cipher.data.offset; @@ -1215,26 +1253,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FD_LEN(fd, ip_fle->length); DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - -#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) - offset = sprintf(debug_str, - "CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ", - DPAA2_GET_FD_ADDR(fd), - bpid < MAX_BPID ? "SW" : "BMAN"); - if (bpid < MAX_BPID) { - offset += sprintf(&debug_str[offset], - "bpid = %d ", bpid); - } - offset += sprintf(&debug_str[offset], - "private size = %d ", - mbuf->pool->private_data_size); - offset += sprintf(&debug_str[offset], - "off =%d, len =%d", - DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); - DPAA2_SEC_DP_DEBUG("%s", debug_str); -#else - RTE_SET_USED(bpid); -#endif + dpaa2_sec_dp_fd_dump(fd, bpid, mbuf, true); return 0; } @@ -1251,10 +1270,6 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv.offset); struct rte_mbuf *dst; -#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) - char debug_str[1024]; - int offset; -#endif data_len = sym_op->cipher.data.length; data_offset = sym_op->cipher.data.offset; @@ -1345,24 +1360,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, sge->length = data_len; DPAA2_SET_FLE_FIN(sge); DPAA2_SET_FLE_FIN(fle); - -#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) - offset = sprintf(debug_str, - "CIPHER: fdaddr =%" PRIx64 ", from %s pool ", - DPAA2_GET_FD_ADDR(fd), - bpid < MAX_BPID ? "SW" : "BMAN"); - if (bpid < MAX_BPID) { - offset += sprintf(&debug_str[offset], - "bpid = %d ", bpid); - } - offset += sprintf(&debug_str[offset], - "private size = %d ", - dst->pool->private_data_size); - offset += sprintf(&debug_str[offset], - "off =%d, len =%d", - DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); - DPAA2_SEC_DP_DEBUG("%s", debug_str); -#endif + dpaa2_sec_dp_fd_dump(fd, bpid, dst, true); return 0; } @@ -1595,10 +1593,6 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) struct qbman_fle *fle; struct rte_crypto_op *op; struct rte_mbuf *dst, *src; -#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) - char debug_str[1024]; - int offset; -#endif if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) return sec_simple_fd_to_mbuf(fd); @@ -1636,22 +1630,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) } dst->data_len = len; } - -#if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) - offset = sprintf(debug_str, "Mbuf %p from %s pool ", - dst, DPAA2_GET_FD_IVP(fd) ? "SW" : "BMAN"); - if (!DPAA2_GET_FD_IVP(fd)) { - offset += sprintf(&debug_str[offset], "bpid = %d ", - DPAA2_GET_FD_BPID(fd)); - } - offset += sprintf(&debug_str[offset], - "private size = %d ", dst->pool->private_data_size); - offset += sprintf(&debug_str[offset], - "addr %p, fdaddr =%" PRIx64 ", off =%d, len =%d", - dst->buf_addr, DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); - DPAA2_SEC_DP_DEBUG("%s", debug_str); -#endif + dpaa2_sec_dp_fd_dump(fd, 0, dst, false); /* free the fle memory */ if (likely(rte_pktmbuf_is_contiguous(src))) { @@ -1932,7 +1911,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, } } swp = DPAA2_PER_LCORE_PORTAL; - dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; + dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, @@ -2023,10 +2002,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) PMD_INIT_FUNC_TRACE(); - if (qp->rx_vq.q_storage) { - dpaa2_free_dq_storage(qp->rx_vq.q_storage); - rte_free(qp->rx_vq.q_storage); - } + dpaa2_queue_storage_free(&qp->rx_vq, 1); rte_mempool_free(qp->fle_pool); rte_free(qp); @@ -2077,18 +2053,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, qp->rx_vq.crypto_data = dev->data; qp->tx_vq.crypto_data = dev->data; - qp->rx_vq.q_storage = rte_malloc("sec dq storage", - sizeof(struct queue_storage_info_t), - RTE_CACHE_LINE_SIZE); - if (!qp->rx_vq.q_storage) { - DPAA2_SEC_ERR("malloc failed for q_storage"); - return -ENOMEM; - } - memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); - - if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { - DPAA2_SEC_ERR("Unable to allocate dequeue storage"); - return -ENOMEM; + retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1); + if (retcode) { + dpaa2_queue_storage_free((&qp->rx_vq), 1); + return retcode; } dev->data->queue_pairs[qp_id] = qp; @@ -3164,6 +3132,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, uint8_t hdr[48] = {}; struct rte_ipv4_hdr *ip4_hdr; struct rte_ipv6_hdr *ip6_hdr; + struct rte_udp_hdr *uh = NULL; struct ipsec_encap_pdb encap_pdb; flc->dhr = SEC_FLC_DHR_OUTBOUND; @@ -3235,29 +3204,10 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip, sizeof(struct in_addr)); if (ipsec_xform->options.udp_encap) { - uint16_t sport, dport; - struct rte_udp_hdr *uh = - (struct rte_udp_hdr *) (hdr + - sizeof(struct rte_ipv4_hdr)); - - sport = ipsec_xform->udp.sport ? - ipsec_xform->udp.sport : 4500; - dport = ipsec_xform->udp.dport ? - ipsec_xform->udp.dport : 4500; - uh->src_port = rte_cpu_to_be_16(sport); - uh->dst_port = rte_cpu_to_be_16(dport); - uh->dgram_len = 0; - uh->dgram_cksum = 0; - ip4_hdr->next_proto_id = IPPROTO_UDP; - ip4_hdr->total_length = - rte_cpu_to_be_16( + ip4_hdr->total_length = rte_cpu_to_be_16( sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr)); - encap_pdb.ip_hdr_len += - sizeof(struct rte_udp_hdr); - encap_pdb.options |= - PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; } else { ip4_hdr->total_length = rte_cpu_to_be_16( @@ -3284,14 +3234,39 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, ip6_hdr->payload_len = 0; ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ? ipsec_xform->tunnel.ipv6.hlimit : 0x40; - ip6_hdr->proto = (ipsec_xform->proto == - RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? - IPPROTO_ESP : IPPROTO_AH; memcpy(&ip6_hdr->src_addr, &ipsec_xform->tunnel.ipv6.src_addr, 16); memcpy(&ip6_hdr->dst_addr, &ipsec_xform->tunnel.ipv6.dst_addr, 16); encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr); + if (ipsec_xform->options.udp_encap) + ip6_hdr->proto = IPPROTO_UDP; + else + ip6_hdr->proto = (ipsec_xform->proto == + RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? + IPPROTO_ESP : IPPROTO_AH; + } + if (ipsec_xform->options.udp_encap) { + uint16_t sport, dport; + + if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) + uh = (struct rte_udp_hdr *) (hdr + + sizeof(struct rte_ipv4_hdr)); + else + uh = (struct rte_udp_hdr *) (hdr + + sizeof(struct rte_ipv6_hdr)); + + sport = ipsec_xform->udp.sport ? + ipsec_xform->udp.sport : DPAA2_DEFAULT_NAT_T_PORT; + dport = ipsec_xform->udp.dport ? + ipsec_xform->udp.dport : DPAA2_DEFAULT_NAT_T_PORT; + uh->src_port = rte_cpu_to_be_16(sport); + uh->dst_port = rte_cpu_to_be_16(dport); + uh->dgram_len = 0; + uh->dgram_cksum = 0; + + encap_pdb.ip_hdr_len += sizeof(struct rte_udp_hdr); + encap_pdb.options |= PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; } bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, @@ -3320,13 +3295,23 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { - decap_pdb.options = sizeof(struct ip) << 16; + if (ipsec_xform->options.udp_encap) + decap_pdb.options = + (sizeof(struct ip) + sizeof(struct rte_udp_hdr)) << 16; + else + decap_pdb.options = sizeof(struct ip) << 16; if (ipsec_xform->options.copy_df) decap_pdb.options |= PDBHMO_ESP_DFV; if (ipsec_xform->options.dec_ttl) decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL; } else { - decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; + if (ipsec_xform->options.udp_encap) { + decap_pdb.options = + (sizeof(struct rte_ipv6_hdr) + + sizeof(struct rte_udp_hdr)) << 16; + } else { + decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; + } } if (ipsec_xform->options.esn) { decap_pdb.options |= PDBOPTS_ESP_ESN; diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c index 883584a6e2..fb0408f8ad 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2021-2022 NXP + * Copyright 2021-2022, 2024 NXP */ #include @@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, } } swp = DPAA2_PER_LCORE_PORTAL; - dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; + dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c index 87e0defdc6..773b4648e0 100644 --- a/drivers/crypto/dpaa2_sec/mc/dpseci.c +++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016 NXP + * Copyright 2016-2023 NXP * */ #include @@ -763,3 +763,92 @@ int dpseci_get_congestion_notification( return 0; } + + +/** + * dpseci_get_rx_queue_status() - Get queue status attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @queue_index: Select the queue_index + * @attr: Returned queue status attributes + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_rx_queue_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t queue_index, + struct dpseci_queue_status *attr) +{ + struct dpseci_rsp_get_queue_status *rsp_params; + struct dpseci_cmd_get_queue_status *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE_STATUS, + cmd_flags, + token); + cmd_params = (struct dpseci_cmd_get_queue_status *)cmd.params; + cmd_params->queue_index = cpu_to_le32(queue_index); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpseci_rsp_get_queue_status *)cmd.params; + attr->fqid = le32_to_cpu(rsp_params->fqid); + attr->schedstate = (enum qbman_fq_schedstate_e)(le16_to_cpu(rsp_params->schedstate)); + attr->state_flags = le16_to_cpu(rsp_params->state_flags); + attr->frame_count = le32_to_cpu(rsp_params->frame_count); + attr->byte_count = le32_to_cpu(rsp_params->byte_count); + + return 0; +} + +/** + * dpseci_get_tx_queue_status() - Get queue status attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @queue_index: Select the queue_index + * @attr: Returned queue status attributes + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_tx_queue_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t queue_index, + struct dpseci_queue_status *attr) +{ + struct dpseci_rsp_get_queue_status *rsp_params; + struct dpseci_cmd_get_queue_status *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE_STATUS, + cmd_flags, + token); + cmd_params = (struct dpseci_cmd_get_queue_status *)cmd.params; + cmd_params->queue_index = cpu_to_le32(queue_index); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpseci_rsp_get_queue_status *)cmd.params; + attr->fqid = le32_to_cpu(rsp_params->fqid); + attr->schedstate = (enum qbman_fq_schedstate_e)(le16_to_cpu(rsp_params->schedstate)); + attr->state_flags = le16_to_cpu(rsp_params->state_flags); + attr->frame_count = le32_to_cpu(rsp_params->frame_count); + attr->byte_count = le32_to_cpu(rsp_params->byte_count); + + return 0; +} diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h index c295c04f24..e371abdd64 100644 --- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h +++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2023 NXP * */ #ifndef __FSL_DPSECI_H @@ -429,4 +429,49 @@ int dpseci_get_congestion_notification( uint16_t token, struct dpseci_congestion_notification_cfg *cfg); +/* Available FQ's scheduling states */ +enum qbman_fq_schedstate_e { + qbman_fq_schedstate_oos = 0, + qbman_fq_schedstate_retired, + qbman_fq_schedstate_tentatively_scheduled, + qbman_fq_schedstate_truly_scheduled, + qbman_fq_schedstate_parked, + qbman_fq_schedstate_held_active, +}; + +/* FQ's force eligible pending bit */ +#define DPSECI_FQ_STATE_FORCE_ELIGIBLE 0x00000001 +/* FQ's XON/XOFF state, 0: XON, 1: XOFF */ +#define DPSECI_FQ_STATE_XOFF 0x00000002 +/* FQ's retirement pending bit */ +#define DPSECI_FQ_STATE_RETIREMENT_PENDING 0x00000004 +/* FQ's overflow error bit */ +#define DPSECI_FQ_STATE_OVERFLOW_ERROR 0x00000008 + +struct dpseci_queue_status { + uint32_t fqid; + /* FQ's scheduling states + * (available scheduling states are defined in qbman_fq_schedstate_e) + */ + enum qbman_fq_schedstate_e schedstate; + /* FQ's state flags (available flags are defined above) */ + uint16_t state_flags; + /* FQ's frame count */ + uint32_t frame_count; + /* FQ's byte count */ + uint32_t byte_count; +}; + +int dpseci_get_rx_queue_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t queue_index, + struct dpseci_queue_status *attr); + +int dpseci_get_tx_queue_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t queue_index, + struct dpseci_queue_status *attr); + #endif /* __FSL_DPSECI_H */ diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h index af3518a0f3..065464b701 100644 --- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h +++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2017 NXP + * Copyright 2016-2023 NXP * */ #ifndef _FSL_DPSECI_CMD_H @@ -9,7 +9,7 @@ /* DPSECI Version */ #define DPSECI_VER_MAJOR 5 -#define DPSECI_VER_MINOR 3 +#define DPSECI_VER_MINOR 4 /* Command versioning */ #define DPSECI_CMD_BASE_VERSION 1 @@ -46,6 +46,9 @@ #define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B) #define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170) #define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171) +#define DPSECI_CMDID_GET_RX_QUEUE_STATUS DPSECI_CMD_V1(0x172) +#define DPSECI_CMDID_GET_TX_QUEUE_STATUS DPSECI_CMD_V1(0x173) + /* Macros for accessing command fields smaller than 1byte */ #define DPSECI_MASK(field) \ @@ -251,5 +254,17 @@ struct dpseci_cmd_set_congestion_notification { uint32_t threshold_exit; }; +struct dpseci_cmd_get_queue_status { + uint32_t queue_index; +}; + +struct dpseci_rsp_get_queue_status { + uint32_t fqid; + uint16_t schedstate; + uint16_t state_flags; + uint32_t frame_count; + uint32_t byte_count; +}; + #pragma pack(pop) #endif /* _FSL_DPSECI_CMD_H */ diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 225bf950e9..3fa88ca968 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,7 @@ #include #define DRIVER_DUMP_MODE "drv_dump_mode" +#define DPAA_DEFAULT_NAT_T_PORT 4500 /* DPAA_SEC_DP_DUMP levels */ enum dpaa_sec_dump_levels { @@ -2961,15 +2963,22 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, RTE_SECURITY_IPSEC_TUNNEL_IPV4) { session->ip4_hdr.ip_v = IPVERSION; session->ip4_hdr.ip_hl = 5; - session->ip4_hdr.ip_len = rte_cpu_to_be_16( - sizeof(session->ip4_hdr)); + if (ipsec_xform->options.udp_encap) + session->ip4_hdr.ip_len = rte_cpu_to_be_16( + sizeof(session->ip4_hdr) + sizeof(struct rte_udp_hdr)); + else + session->ip4_hdr.ip_len = rte_cpu_to_be_16( + sizeof(session->ip4_hdr)); session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp; session->ip4_hdr.ip_id = 0; session->ip4_hdr.ip_off = 0; session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl; - session->ip4_hdr.ip_p = (ipsec_xform->proto == - RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? - IPPROTO_ESP : IPPROTO_AH; + if (ipsec_xform->options.udp_encap) + session->ip4_hdr.ip_p = IPPROTO_UDP; + else + session->ip4_hdr.ip_p = (ipsec_xform->proto == + RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? + IPPROTO_ESP : IPPROTO_AH; session->ip4_hdr.ip_sum = 0; session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip; @@ -2993,9 +3002,12 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, session->ip6_hdr.payload_len = 0; session->ip6_hdr.hop_limits = ipsec_xform->tunnel.ipv6.hlimit; - session->ip6_hdr.proto = (ipsec_xform->proto == - RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? - IPPROTO_ESP : IPPROTO_AH; + if (ipsec_xform->options.udp_encap) + session->ip6_hdr.proto = IPPROTO_UDP; + else + session->ip6_hdr.proto = (ipsec_xform->proto == + RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? + IPPROTO_ESP : IPPROTO_AH; memcpy(&session->ip6_hdr.src_addr, &ipsec_xform->tunnel.ipv6.src_addr, 16); memcpy(&session->ip6_hdr.dst_addr, @@ -3011,19 +3023,69 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, PDBHMO_ESP_SNR; if (ipsec_xform->options.dec_ttl) session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL; - if (ipsec_xform->options.esn) - session->encap_pdb.options |= PDBOPTS_ESP_ESN; session->encap_pdb.spi = ipsec_xform->spi; + /* Initializing the sequence number to 1, Security + * engine will choose this sequence number for first packet + * Refer: RFC4303 section: 3.3.3.Sequence Number Generation + */ + session->encap_pdb.seq_num = 1; + if (ipsec_xform->options.esn) { + session->encap_pdb.options |= PDBOPTS_ESP_ESN; + session->encap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; + session->encap_pdb.seq_num = conf->ipsec.esn.low; + } + if (ipsec_xform->options.udp_encap) { + struct rte_udp_hdr *udp_hdr; + + if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) + udp_hdr = (struct rte_udp_hdr *)(&session->udp4.udp_hdr); + else + udp_hdr = (struct rte_udp_hdr *)(&session->udp6.udp_hdr); + + if (ipsec_xform->udp.sport) + udp_hdr->src_port = rte_cpu_to_be_16(ipsec_xform->udp.sport); + else + udp_hdr->src_port = rte_cpu_to_be_16(DPAA_DEFAULT_NAT_T_PORT); + if (ipsec_xform->udp.dport) + udp_hdr->dst_port = rte_cpu_to_be_16(ipsec_xform->udp.dport); + else + udp_hdr->dst_port = rte_cpu_to_be_16(DPAA_DEFAULT_NAT_T_PORT); + udp_hdr->dgram_len = 0; + udp_hdr->dgram_cksum = 0; + + session->encap_pdb.ip_hdr_len += sizeof(struct rte_udp_hdr); + session->encap_pdb.options |= PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC; + } + if (ipsec_xform->options.ecn) + session->encap_pdb.options |= PDBOPTS_ESP_TECN; } else if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { - if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) - session->decap_pdb.options = sizeof(struct ip) << 16; - else - session->decap_pdb.options = - sizeof(struct rte_ipv6_hdr) << 16; - if (ipsec_xform->options.esn) + if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + if (ipsec_xform->options.udp_encap) + session->decap_pdb.options = + (sizeof(struct ip) + sizeof(struct rte_udp_hdr)) << 16; + else + session->decap_pdb.options = sizeof(struct ip) << 16; + if (ipsec_xform->options.copy_df) + session->decap_pdb.options |= PDBHMO_ESP_DFV; + } else { + if (ipsec_xform->options.udp_encap) + session->decap_pdb.options = + (sizeof(struct rte_ipv6_hdr) + sizeof(struct rte_udp_hdr)) << 16; + else + session->decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16; + } + if (ipsec_xform->options.esn) { session->decap_pdb.options |= PDBOPTS_ESP_ESN; + session->decap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi; + session->decap_pdb.seq_num = conf->ipsec.esn.low; + } + if (ipsec_xform->options.copy_dscp) + session->decap_pdb.options |= PDBHMO_ESP_DIFFSERV; + if (ipsec_xform->options.ecn) + session->decap_pdb.options |= PDBOPTS_ESP_TECN; + if (ipsec_xform->replay_win_sz) { uint32_t win_sz; win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h index eff6dcf311..64d9e22159 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright 2016-2023 NXP + * Copyright 2016-2024 NXP * */ @@ -143,6 +143,16 @@ typedef struct dpaa_sec_job* (*dpaa_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx, void *userdata, struct qm_fd *fd); +struct dpaa_ipv4_udp { + struct ip ip4_hdr; + struct rte_udp_hdr udp_hdr; +}; + +struct dpaa_ipv6_udp { + struct rte_ipv6_hdr ip6_hdr; + struct rte_udp_hdr udp_hdr; +}; + typedef struct dpaa_sec_session_entry { struct sec_cdb cdb; /**< cmd block associated with qp */ struct dpaa_sec_qp *qp[MAX_DPAA_CORES]; @@ -191,6 +201,8 @@ typedef struct dpaa_sec_session_entry { union { struct ip ip4_hdr; struct rte_ipv6_hdr ip6_hdr; + struct dpaa_ipv4_udp udp4; + struct dpaa_ipv6_udp udp6; }; uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */ @@ -989,7 +1001,14 @@ static const struct rte_security_capability dpaa_sec_security_cap[] = { .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, - .options = { 0 }, + .options = { + .copy_df = 1, + .copy_dscp = 1, + .dec_ttl = 1, + .ecn = 1, + .esn = 1, + .udp_encap = 1, + }, .replay_win_sz_max = 128 }, .crypto_capabilities = dpaa_sec_capabilities @@ -1001,7 +1020,14 @@ static const struct rte_security_capability dpaa_sec_security_cap[] = { .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, - .options = { 0 }, + .options = { + .copy_df = 1, + .copy_dscp = 1, + .dec_ttl = 1, + .ecn = 1, + .esn = 1, + .udp_encap = 1, + }, .replay_win_sz_max = 128 }, .crypto_capabilities = dpaa_sec_capabilities diff --git a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c index ba899604d2..910efb1a97 100644 --- a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +++ b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c @@ -11,8 +11,6 @@ #include "ipsec_mb_private.h" -#define IMB_MP_REQ_VER_STR "1.1.0" - /** Configure device */ int ipsec_mb_config(__rte_unused struct rte_cryptodev *dev, @@ -147,15 +145,10 @@ ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rte_ring_free(rte_ring_lookup(qp->name)); -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - if (qp->mb_mgr) - free_mb_mgr(qp->mb_mgr); -#else if (qp->mb_mgr_mz) { rte_memzone_free(qp->mb_mgr_mz); qp->mb_mgr = NULL; } -#endif rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; } else { /* secondary process */ @@ -211,7 +204,6 @@ static struct rte_ring RING_F_SP_ENQ | RING_F_SC_DEQ); } -#if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM static IMB_MGR * ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz, const char *mb_mgr_mz_name) @@ -244,7 +236,6 @@ ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz, } return mb_mgr; } -#endif /** Setup a queue pair */ int @@ -260,12 +251,6 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, int ret; if (rte_eal_process_type() == RTE_PROC_SECONDARY) { -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess," - "the minimum version required for this feature is %s.", - IMB_VERSION_STR, IMB_MP_REQ_VER_STR); - return -EINVAL; -#endif qp = dev->data->queue_pairs[qp_id]; if (qp == NULL) { IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp."); @@ -285,15 +270,11 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return -ENOMEM; } -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - qp->mb_mgr = alloc_init_mb_mgr(); -#else char mz_name[IPSEC_MB_MAX_MZ_NAME]; snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d", dev->data->dev_id, qp_id); qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz), mz_name); -#endif if (qp->mb_mgr == NULL) { ret = -ENOMEM; goto qp_setup_cleanup; @@ -330,14 +311,9 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return 0; qp_setup_cleanup: -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - if (qp->mb_mgr) - free_mb_mgr(qp->mb_mgr); -#else if (rte_eal_process_type() == RTE_PROC_SECONDARY) return ret; rte_memzone_free(qp->mb_mgr_mz); -#endif rte_free(qp); return ret; } diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build index 81631d3050..89ee03454b 100644 --- a/drivers/crypto/ipsec_mb/meson.build +++ b/drivers/crypto/ipsec_mb/meson.build @@ -7,7 +7,7 @@ if is_windows subdir_done() endif -IMB_required_ver = '1.0.0' +IMB_required_ver = '1.4.0' IMB_header = '#include' if arch_subdir == 'arm' IMB_header = '#include' diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index a275ff0fe2..05dc1a039f 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -15,51 +15,6 @@ struct aesni_mb_op_buf_data { uint32_t offset; }; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM -/** - * Calculate the authentication pre-computes - * - * @param one_block_hash Function pointer - * to calculate digest on ipad/opad - * @param ipad Inner pad output byte array - * @param opad Outer pad output byte array - * @param hkey Authentication key - * @param hkey_len Authentication key length - * @param blocksize Block size of selected hash algo - */ -static void -calculate_auth_precomputes(hash_one_block_t one_block_hash, - uint8_t *ipad, uint8_t *opad, - const uint8_t *hkey, uint16_t hkey_len, - uint16_t blocksize) -{ - uint32_t i, length; - - alignas(16) uint8_t ipad_buf[blocksize]; - alignas(16) uint8_t opad_buf[blocksize]; - - /* Setup inner and outer pads */ - memset(ipad_buf, HMAC_IPAD_VALUE, blocksize); - memset(opad_buf, HMAC_OPAD_VALUE, blocksize); - - /* XOR hash key with inner and outer pads */ - length = hkey_len > blocksize ? blocksize : hkey_len; - - for (i = 0; i < length; i++) { - ipad_buf[i] ^= hkey[i]; - opad_buf[i] ^= hkey[i]; - } - - /* Compute partial hashes */ - (*one_block_hash)(ipad_buf, ipad); - (*one_block_hash)(opad_buf, opad); - - /* Clean up stack */ - memset(ipad_buf, 0, blocksize); - memset(opad_buf, 0, blocksize); -} -#endif - static inline int is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) { @@ -74,10 +29,6 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, struct aesni_mb_session *sess, const struct rte_crypto_sym_xform *xform) { -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_one_block_t hash_oneblock_fn = NULL; - unsigned int key_larger_block_size = 0; -#endif uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 }; uint32_t auth_precompute = 1; @@ -216,13 +167,9 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, } } else if (xform->auth.key.length == 32) { sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (sess->auth.req_digest_len != 4 && sess->auth.req_digest_len != 8 && sess->auth.req_digest_len != 16) { -#else - if (sess->auth.req_digest_len != 4) { -#endif IPSEC_MB_LOG(ERR, "Invalid digest size"); return -EINVAL; } @@ -273,24 +220,15 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, switch (xform->auth.algo) { case RTE_CRYPTO_AUTH_MD5_HMAC: sess->template_job.hash_alg = IMB_AUTH_MD5; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_oneblock_fn = mb_mgr->md5_one_block; -#endif break; case RTE_CRYPTO_AUTH_SHA1_HMAC: sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_oneblock_fn = mb_mgr->sha1_one_block; -#endif if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_1)) { IMB_SHA1(mb_mgr, xform->auth.key.data, xform->auth.key.length, hashed_key); -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - key_larger_block_size = 1; -#endif } break; case RTE_CRYPTO_AUTH_SHA1: @@ -299,18 +237,12 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AUTH_SHA224_HMAC: sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_oneblock_fn = mb_mgr->sha224_one_block; -#endif if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_224)) { IMB_SHA224(mb_mgr, xform->auth.key.data, xform->auth.key.length, hashed_key); -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - key_larger_block_size = 1; -#endif } break; case RTE_CRYPTO_AUTH_SHA224: @@ -319,18 +251,12 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AUTH_SHA256_HMAC: sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_oneblock_fn = mb_mgr->sha256_one_block; -#endif if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_256)) { IMB_SHA256(mb_mgr, xform->auth.key.data, xform->auth.key.length, hashed_key); -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - key_larger_block_size = 1; -#endif } break; case RTE_CRYPTO_AUTH_SHA256: @@ -339,18 +265,12 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AUTH_SHA384_HMAC: sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_oneblock_fn = mb_mgr->sha384_one_block; -#endif if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_384)) { IMB_SHA384(mb_mgr, xform->auth.key.data, xform->auth.key.length, hashed_key); -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - key_larger_block_size = 1; -#endif } break; case RTE_CRYPTO_AUTH_SHA384: @@ -359,18 +279,12 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AUTH_SHA512_HMAC: sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - hash_oneblock_fn = mb_mgr->sha512_one_block; -#endif if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_512)) { IMB_SHA512(mb_mgr, xform->auth.key.data, xform->auth.key.length, hashed_key); -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - key_larger_block_size = 1; -#endif } break; case RTE_CRYPTO_AUTH_SHA512: @@ -412,25 +326,9 @@ aesni_mb_set_session_auth_parameters(IMB_MGR *mb_mgr, return 0; /* Calculate Authentication precomputes */ -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM - imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg, - xform->auth.key.data, xform->auth.key.length, - sess->auth.pads.inner, sess->auth.pads.outer); -#else - if (key_larger_block_size) { - calculate_auth_precomputes(hash_oneblock_fn, - sess->auth.pads.inner, sess->auth.pads.outer, - hashed_key, - xform->auth.key.length, - get_auth_algo_blocksize(sess->template_job.hash_alg)); - } else { - calculate_auth_precomputes(hash_oneblock_fn, - sess->auth.pads.inner, sess->auth.pads.outer, - xform->auth.key.data, - xform->auth.key.length, - get_auth_algo_blocksize(sess->template_job.hash_alg)); - } -#endif + imb_hmac_ipad_opad(mb_mgr, sess->template_job.hash_alg, + xform->auth.key.data, xform->auth.key.length, + sess->auth.pads.inner, sess->auth.pads.outer); sess->template_job.u.HMAC._hashed_auth_key_xor_ipad = sess->auth.pads.inner; sess->template_job.u.HMAC._hashed_auth_key_xor_opad = @@ -915,11 +813,9 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, } } -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM sess->session_id = imb_set_session(mb_mgr, &sess->template_job); sess->pid = getpid(); RTE_PER_LCORE(pid) = sess->pid; -#endif return 0; } @@ -1052,9 +948,7 @@ aesni_mb_set_docsis_sec_session_parameters( goto error_exit; } -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); -#endif error_exit: free_mb_mgr(mb_mgr); @@ -1309,7 +1203,6 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) return 0; } -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM static inline int single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, int oop, uint32_t offset, struct rte_mbuf *m_src, @@ -1394,7 +1287,6 @@ single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, job->sgl_io_segs = sgl_segs; return 0; } -#endif static inline int multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, @@ -1464,9 +1356,7 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, job->msg_len_to_hash_in_bytes = 0; job->msg_len_to_cipher_in_bytes = 0; job->cipher_start_src_offset_in_bytes = 0; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM imb_set_session(mb_mgr, job); -#endif } else { job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; @@ -1494,13 +1384,11 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, job->src = NULL; job->dst = NULL; -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (m_src->nb_segs <= MAX_NUM_SEGS) return single_sgl_job(job, op, oop, m_offset, m_src, m_dst, qp_data->sgl_segs); else -#endif return multi_sgl_job(job, op, oop, m_offset, m_src, m_dst, mb_mgr); } else { @@ -1590,10 +1478,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, uint8_t sgl = 0; uint8_t lb_sgl = 0; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - (void) pid; -#endif - session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; @@ -1603,12 +1487,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, const IMB_CIPHER_MODE cipher_mode = session->template_job.cipher_mode; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM if (session->pid != pid) { memcpy(job, &session->template_job, sizeof(IMB_JOB)); imb_set_session(mb_mgr, job); } else if (job->session_id != session->session_id) -#endif memcpy(job, &session->template_job, sizeof(IMB_JOB)); if (!op->sym->m_dst) { @@ -1649,9 +1531,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; job->cipher_mode = IMB_CIPHER_GCM_SGL; job->hash_alg = IMB_AUTH_GCM_SGL; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM imb_set_session(mb_mgr, job); -#endif } break; case IMB_AUTH_AES_GMAC_128: @@ -1676,9 +1556,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM imb_set_session(mb_mgr, job); -#endif } break; default: @@ -1874,13 +1752,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (lb_sgl) return handle_sgl_linear(job, op, m_offset, session); -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (m_src->nb_segs <= MAX_NUM_SEGS) return single_sgl_job(job, op, oop, m_offset, m_src, m_dst, qp_data->sgl_segs); else -#endif return multi_sgl_job(job, op, oop, m_offset, m_src, m_dst, mb_mgr); } @@ -2200,7 +2076,6 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) return job; } -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM uint16_t aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -2333,144 +2208,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, return processed_jobs; } -#else - -/** - * Process a completed IMB_JOB job and keep processing jobs until - * get_completed_job return NULL - * - * @param qp Queue Pair to process - * @param mb_mgr IMB_MGR to use - * @param job IMB_JOB job - * @param ops crypto ops to fill - * @param nb_ops number of crypto ops - * - * @return - * - Number of processed jobs - */ -static unsigned -handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, - IMB_JOB *job, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - struct rte_crypto_op *op = NULL; - uint16_t processed_jobs = 0; - - while (job != NULL) { - op = post_process_mb_job(qp, job); - - if (op) { - ops[processed_jobs++] = op; - qp->stats.dequeued_count++; - } else { - qp->stats.dequeue_err_count++; - break; - } - if (processed_jobs == nb_ops) - break; - - job = IMB_GET_COMPLETED_JOB(mb_mgr); - } - - return processed_jobs; -} - -static inline uint16_t -flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, - struct rte_crypto_op **ops, uint16_t nb_ops) -{ - int processed_ops = 0; - - /* Flush the remaining jobs */ - IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr); - - if (job) - processed_ops += handle_completed_jobs(qp, mb_mgr, job, - &ops[processed_ops], nb_ops - processed_ops); - - return processed_ops; -} - -uint16_t -aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - struct ipsec_mb_qp *qp = queue_pair; - IMB_MGR *mb_mgr = qp->mb_mgr; - struct rte_crypto_op *op; - IMB_JOB *job; - int retval, processed_jobs = 0; - pid_t pid = 0; - - if (unlikely(nb_ops == 0 || mb_mgr == NULL)) - return 0; - - uint8_t digest_idx = qp->digest_idx; - do { - /* Get next free mb job struct from mb manager */ - job = IMB_GET_NEXT_JOB(mb_mgr); - if (unlikely(job == NULL)) { - /* if no free mb job structs we need to flush mb_mgr */ - processed_jobs += flush_mb_mgr(qp, mb_mgr, - &ops[processed_jobs], - nb_ops - processed_jobs); - - if (nb_ops == processed_jobs) - break; - - job = IMB_GET_NEXT_JOB(mb_mgr); - } - - /* - * Get next operation to process from ingress queue. - * There is no need to return the job to the IMB_MGR - * if there are no more operations to process, since the IMB_MGR - * can use that pointer again in next get_next calls. - */ - retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op); - if (retval < 0) - break; - - if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) - retval = set_sec_mb_job_params(job, qp, op, - &digest_idx); - else - retval = set_mb_job_params(job, qp, op, - &digest_idx, mb_mgr, pid); - - if (unlikely(retval != 0)) { - qp->stats.dequeue_err_count++; - set_job_null_op(job, op); - } - - /* Submit job to multi-buffer for processing */ -#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG - job = IMB_SUBMIT_JOB(mb_mgr); -#else - job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr); -#endif - /* - * If submit returns a processed job then handle it, - * before submitting subsequent jobs - */ - if (job) - processed_jobs += handle_completed_jobs(qp, mb_mgr, - job, &ops[processed_jobs], - nb_ops - processed_jobs); - - } while (processed_jobs < nb_ops); - - qp->digest_idx = digest_idx; - - if (processed_jobs < 1) - processed_jobs += flush_mb_mgr(qp, mb_mgr, - &ops[processed_jobs], - nb_ops - processed_jobs); - - return processed_jobs; -} -#endif static inline int check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) { diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 6120a2f62d..468a1f35eb 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -17,9 +17,7 @@ #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM #define MAX_NUM_SEGS 16 -#endif int aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess, @@ -580,13 +578,8 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { }, .digest_size = { .min = 4, -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM .max = 16, .increment = 4 -#else - .max = 4, - .increment = 0 -#endif }, .iv_size = { .min = 16, @@ -843,9 +836,7 @@ struct aesni_mb_qp_data { * by the driver when verifying a digest provided * by the user (using authentication verify operation) */ -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS]; -#endif union { struct gcm_context_data gcm_sgl_ctx; struct chacha20_poly1305_context_data chacha_sgl_ctx; diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c index 9657b70c7a..b2442c7ebf 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/drivers/crypto/openssl/rte_openssl_pmd.c @@ -2,6 +2,7 @@ * Copyright(c) 2016-2017 Intel Corporation */ +#include #include #include #include @@ -99,22 +100,6 @@ digest_name_get(enum rte_crypto_auth_algorithm algo) static int cryptodev_openssl_remove(struct rte_vdev_device *vdev); -/*----------------------------------------------------------------------------*/ - -/** - * Increment counter by 1 - * Counter is 64 bit array, big-endian - */ -static void -ctr_inc(uint8_t *ctr) -{ - uint64_t *ctr64 = (uint64_t *)ctr; - - *ctr64 = __builtin_bswap64(*ctr64); - (*ctr64)++; - *ctr64 = __builtin_bswap64(*ctr64); -} - /* *------------------------------------------------------------------------------ * Session Prepare @@ -692,7 +677,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, else return -EINVAL; - rte_memcpy(algo_name, algo, strlen(algo) + 1); + strlcpy(algo_name, algo, sizeof(algo_name)); params[0] = OSSL_PARAM_construct_utf8_string( OSSL_MAC_PARAM_CIPHER, algo_name, 0); params[1] = OSSL_PARAM_construct_end(); @@ -1192,7 +1177,8 @@ static int process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx) { - uint8_t ebuf[8], ctr[8]; + uint8_t ebuf[8]; + uint64_t ctr; int unused, n; struct rte_mbuf *m; uint8_t *src; @@ -1208,15 +1194,19 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); l = rte_pktmbuf_data_len(m) - offset; - memcpy(ctr, iv, 8); + memcpy(&ctr, iv, 8); for (n = 0; n < srclen; n++) { if (n % 8 == 0) { + uint64_t cpu_ctr; + if (EVP_EncryptUpdate(ctx, (unsigned char *)&ebuf, &unused, (const unsigned char *)&ctr, 8) <= 0) goto process_cipher_des3ctr_err; - ctr_inc(ctr); + cpu_ctr = rte_be_to_cpu_64(ctr); + cpu_ctr++; + ctr = rte_cpu_to_be_64(cpu_ctr); } dst[n] = *(src++) ^ ebuf[n % 8]; diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c index 9e97582e22..f5b56b2f71 100644 --- a/drivers/crypto/qat/qat_asym.c +++ b/drivers/crypto/qat/qat_asym.c @@ -277,6 +277,7 @@ modexp_collect(struct rte_crypto_asym_op *asym_op, rte_memcpy(modexp_result, cookie->output_array[0] + alg_bytesize - n.length, n.length); + asym_op->modex.result.length = alg_bytesize; HEXDUMP("ModExp result", cookie->output_array[0], alg_bytesize); return RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -338,6 +339,7 @@ modinv_collect(struct rte_crypto_asym_op *asym_op, - n.length), cookie->output_array[0] + alg_bytesize - n.length, n.length); + asym_op->modinv.result.length = alg_bytesize; HEXDUMP("ModInv result", cookie->output_array[0], alg_bytesize); return RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -1346,11 +1348,48 @@ session_set_rsa(struct qat_asym_session *qat_session, return ret; } -static void +static int session_set_ec(struct qat_asym_session *qat_session, struct rte_crypto_asym_xform *xform) { + uint8_t *pkey = xform->ec.pkey.data; + uint8_t *q_x = xform->ec.q.x.data; + uint8_t *q_y = xform->ec.q.y.data; + + qat_session->xform.ec.pkey.data = + rte_malloc(NULL, xform->ec.pkey.length, 0); + if (qat_session->xform.ec.pkey.length && + qat_session->xform.ec.pkey.data == NULL) + return -ENOMEM; + qat_session->xform.ec.q.x.data = rte_malloc(NULL, + xform->ec.q.x.length, 0); + if (qat_session->xform.ec.q.x.length && + qat_session->xform.ec.q.x.data == NULL) { + rte_free(qat_session->xform.ec.pkey.data); + return -ENOMEM; + } + qat_session->xform.ec.q.y.data = rte_malloc(NULL, + xform->ec.q.y.length, 0); + if (qat_session->xform.ec.q.y.length && + qat_session->xform.ec.q.y.data == NULL) { + rte_free(qat_session->xform.ec.pkey.data); + rte_free(qat_session->xform.ec.q.x.data); + return -ENOMEM; + } + + memcpy(qat_session->xform.ec.pkey.data, pkey, + xform->ec.pkey.length); + qat_session->xform.ec.pkey.length = xform->ec.pkey.length; + memcpy(qat_session->xform.ec.q.x.data, q_x, + xform->ec.q.x.length); + qat_session->xform.ec.q.x.length = xform->ec.q.x.length; + memcpy(qat_session->xform.ec.q.y.data, q_y, + xform->ec.q.y.length); + qat_session->xform.ec.q.y.length = xform->ec.q.y.length; qat_session->xform.ec.curve_id = xform->ec.curve_id; + + return 0; + } int @@ -1386,7 +1425,7 @@ qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused, case RTE_CRYPTO_ASYM_XFORM_ECDSA: case RTE_CRYPTO_ASYM_XFORM_ECPM: case RTE_CRYPTO_ASYM_XFORM_ECDH: - session_set_ec(qat_session, xform); + ret = session_set_ec(qat_session, xform); break; case RTE_CRYPTO_ASYM_XFORM_SM2: break; diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c index 2d5307b22e..e7be3767b2 100644 --- a/drivers/dma/cnxk/cnxk_dmadev.c +++ b/drivers/dma/cnxk/cnxk_dmadev.c @@ -20,6 +20,10 @@ cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_inf RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV | RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG | RTE_DMA_CAPA_M2D_AUTO_FREE; + if (roc_feature_dpi_has_priority()) { + dev_info->dev_capa |= RTE_DMA_CAPA_PRI_POLICY_SP; + dev_info->nb_priorities = CN10K_DPI_MAX_PRI; + } dev_info->max_desc = CNXK_DPI_MAX_DESC; dev_info->min_desc = CNXK_DPI_MIN_DESC; dev_info->max_sges = CNXK_DPI_MAX_POINTER; @@ -107,6 +111,8 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, */ cnxk_dmadev_vchan_free(dpivf, RTE_DMA_ALL_VCHAN); dpivf->num_vchans = conf->nb_vchans; + if (roc_feature_dpi_has_priority()) + dpivf->rdpi.priority = conf->priority; return 0; } diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h index 15af1d64dc..39fd6afbe9 100644 --- a/drivers/dma/cnxk/cnxk_dmadev.h +++ b/drivers/dma/cnxk/cnxk_dmadev.h @@ -28,6 +28,7 @@ ((s).var - 1)) #define CNXK_DPI_MAX_DESC 32768 #define CNXK_DPI_MIN_DESC 2 +#define CN10K_DPI_MAX_PRI 2 #define CNXK_DPI_MAX_VCHANS_PER_QUEUE 4 #define CNXK_DPI_QUEUE_BUF_SIZE 16256 #define CNXK_DPI_QUEUE_BUF_SIZE_V2 130944 diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c index 3d4fd818f8..a541398e48 100644 --- a/drivers/dma/dpaa/dpaa_qdma.c +++ b/drivers/dma/dpaa/dpaa_qdma.c @@ -1,76 +1,82 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2021 NXP + * Copyright 2021-2024 NXP */ #include #include +#include #include "dpaa_qdma.h" #include "dpaa_qdma_logs.h" +static uint32_t s_sg_max_entry_sz = 2000; +static bool s_hw_err_check; + +#define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check" + static inline void -qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr) +qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr) { ccdf->addr_hi = upper_32_bits(addr); ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr)); } -static inline u64 -qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf) +static inline void +qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr) { - return ccdf->cfg8b_w1 & 0xff; + sge->addr_hi = upper_32_bits(addr); + sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr)); } static inline int -qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf) +qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf, + uint8_t *queue_idx) { - return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK) - >> QDMA_CCDF_OFFSET; -} + uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo; + + if (addr && queue_idx) + *queue_idx = ccdf->queue; + if (addr) { + ccdf->addr_hi = 0; + ccdf->addr_lo = 0; + return true; + } -static inline void -qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset) -{ - ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset); + return false; } static inline int -qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf) +ilog2(int x) { - return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK) - >> QDMA_CCDF_STATUS; -} + int log = 0; -static inline void -qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status) -{ - ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status); + x >>= 1; + + while (x) { + log++; + x >>= 1; + } + return log; } -static inline void -qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len) +static inline int +ilog2_qsize(uint32_t q_size) { - csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK); + return (ilog2(q_size) - ilog2(64)); } -static inline void -qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len) +static inline int +ilog2_qthld(uint32_t q_thld) { - csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK)); + return (ilog2(q_thld) - ilog2(16)); } static inline int -ilog2(int x) +fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue) { - int log = 0; - - x >>= 1; + struct rte_dma_stats *stats = &fsl_queue->stats; - while (x) { - log++; - x >>= 1; - } - return log; + return (stats->submitted - stats->completed); } static u32 @@ -97,12 +103,12 @@ qdma_writel_be(u32 val, void *addr) QDMA_OUT_BE(addr, val); } -static void -*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr) +static void * +dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr) { void *virt_addr; - virt_addr = rte_malloc("dma pool alloc", size, aligned); + virt_addr = rte_zmalloc(nm, size, aligned); if (!virt_addr) return NULL; @@ -111,268 +117,225 @@ static void return virt_addr; } -static void -dma_pool_free(void *addr) -{ - rte_free(addr); -} - -static void -fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan) -{ - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; - struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; - struct fsl_qdma_comp *comp_temp, *_comp_temp; - int id; - - if (--fsl_queue->count) - goto finally; - - id = (fsl_qdma->block_base - fsl_queue->block_base) / - fsl_qdma->block_offset; - - while (rte_atomic32_read(&wait_task[id]) == 1) - rte_delay_us(QDMA_DELAY); - - list_for_each_entry_safe(comp_temp, _comp_temp, - &fsl_queue->comp_used, list) { - list_del(&comp_temp->list); - dma_pool_free(comp_temp->virt_addr); - dma_pool_free(comp_temp->desc_virt_addr); - rte_free(comp_temp); - } - - list_for_each_entry_safe(comp_temp, _comp_temp, - &fsl_queue->comp_free, list) { - list_del(&comp_temp->list); - dma_pool_free(comp_temp->virt_addr); - dma_pool_free(comp_temp->desc_virt_addr); - rte_free(comp_temp); - } - -finally: - fsl_qdma->desc_allocated--; -} - -static void -fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, - dma_addr_t dst, dma_addr_t src, u32 len) -{ - struct fsl_qdma_format *csgf_src, *csgf_dest; - - /* Note: command table (fsl_comp->virt_addr) is getting filled - * directly in cmd descriptors of queues while enqueuing the descriptor - * please refer fsl_qdma_enqueue_desc - * frame list table (virt_addr) + 1) and source, - * destination descriptor table - * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to - * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc - */ - csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2; - csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3; - - /* Status notification is enqueued to status queue. */ - qdma_desc_addr_set64(csgf_src, src); - qdma_csgf_set_len(csgf_src, len); - qdma_desc_addr_set64(csgf_dest, dst); - qdma_csgf_set_len(csgf_dest, len); - /* This entry is the last entry. */ - qdma_csgf_set_f(csgf_dest, len); -} - /* * Pre-request command descriptor and compound S/G for enqueue. */ static int -fsl_qdma_pre_request_enqueue_comp_sd_desc( - struct fsl_qdma_queue *queue, - int size, int aligned) +fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue) { - struct fsl_qdma_comp *comp_temp, *_comp_temp; + struct fsl_qdma_engine *fsl_qdma = queue->engine; struct fsl_qdma_sdf *sdf; struct fsl_qdma_ddf *ddf; - struct fsl_qdma_format *csgf_desc; - int i; - - for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) { - comp_temp = rte_zmalloc("qdma: comp temp", - sizeof(*comp_temp), 0); - if (!comp_temp) - return -ENOMEM; - - comp_temp->virt_addr = - dma_pool_alloc(size, aligned, &comp_temp->bus_addr); - if (!comp_temp->virt_addr) { - rte_free(comp_temp); + struct fsl_qdma_comp_cmd_desc *ccdf; + uint16_t i, j; + struct fsl_qdma_cmpd_ft *ft; + + for (i = 0; i < queue->n_cq; i++) { + dma_addr_t phy_ft = 0; + + queue->ft[i] = dma_pool_alloc(NULL, + sizeof(struct fsl_qdma_cmpd_ft), + RTE_CACHE_LINE_SIZE, &phy_ft); + if (!queue->ft[i]) + goto fail; + if (((uint64_t)queue->ft[i]) & + (RTE_CACHE_LINE_SIZE - 1)) { + DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned", + i, queue->ft[i]); + rte_free(queue->ft[i]); + queue->ft[i] = NULL; goto fail; } - - comp_temp->desc_virt_addr = - dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr); - if (!comp_temp->desc_virt_addr) { - rte_free(comp_temp->virt_addr); - rte_free(comp_temp); + if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) & + (RTE_CACHE_LINE_SIZE - 1)) { + DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned", + i, &queue->ft[i]->desc_ssge[0]); + rte_free(queue->ft[i]); + queue->ft[i] = NULL; goto fail; } - - memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE); - memset(comp_temp->desc_virt_addr, 0, - FSL_QDMA_DESCRIPTOR_BUFFER_SIZE); - - csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1; - sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr; - ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1; + queue->ft[i]->phy_ssge = phy_ft + + offsetof(struct fsl_qdma_cmpd_ft, desc_ssge); + queue->ft[i]->phy_dsge = phy_ft + + offsetof(struct fsl_qdma_cmpd_ft, desc_dsge); + queue->ft[i]->phy_df = phy_ft + + offsetof(struct fsl_qdma_cmpd_ft, df); + + ft = queue->ft[i]; + sdf = &ft->df.sdf; + ddf = &ft->df.ddf; /* Compound Command Descriptor(Frame List Table) */ - qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr); + qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df); /* It must be 32 as Compound S/G Descriptor */ - qdma_csgf_set_len(csgf_desc, 32); + ft->desc_buf.length = sizeof(struct fsl_qdma_df); + /* Descriptor Buffer */ - sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC << - FSL_QDMA_CMD_LWC_OFFSET); - - list_add_tail(&comp_temp->list, &queue->comp_free); + sdf->srttype = FSL_QDMA_CMD_RWTTYPE; +#ifdef RTE_DMA_DPAA_ERRATA_ERR050265 + sdf->prefetch = 1; +#endif + ddf->dwttype = FSL_QDMA_CMD_RWTTYPE; + ddf->lwc = FSL_QDMA_CMD_LWC; + + ccdf = &queue->cq[i]; + qdma_desc_addr_set64(ccdf, phy_ft); + ccdf->format = FSL_QDMA_COMP_SG_FORMAT; + if (!fsl_qdma->is_silent) + ccdf->ser = 1; + ccdf->queue = queue->queue_id; } + queue->ci = 0; return 0; fail: - list_for_each_entry_safe(comp_temp, _comp_temp, - &queue->comp_free, list) { - list_del(&comp_temp->list); - rte_free(comp_temp->virt_addr); - rte_free(comp_temp->desc_virt_addr); - rte_free(comp_temp); - } + for (j = 0; j < i; j++) + rte_free(queue->ft[j]); return -ENOMEM; } -/* - * Request a command descriptor for enqueue. - */ -static struct fsl_qdma_comp * -fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan) +static int +fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, + int queue_id, int block_id) { - struct fsl_qdma_queue *queue = fsl_chan->queue; - struct fsl_qdma_comp *comp_temp; - - if (!list_empty(&queue->comp_free)) { - comp_temp = list_first_entry(&queue->comp_free, - struct fsl_qdma_comp, - list); - list_del(&comp_temp->list); - return comp_temp; + struct fsl_qdma_queue *cmd_queue; + uint32_t queue_size; + char nm[RTE_MEMZONE_NAMESIZE]; + + cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id]; + cmd_queue->engine = fsl_qdma; + + queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) * + QDMA_QUEUE_SIZE; + + sprintf(nm, "Command queue_%d_%d", + block_id, queue_id); + cmd_queue->cq = dma_pool_alloc(nm, queue_size, + queue_size, &cmd_queue->bus_addr); + if (!cmd_queue->cq) { + DPAA_QDMA_ERR("%s alloc failed!", nm); + return -ENOMEM; } - return NULL; -} - -static struct fsl_qdma_queue -*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma) -{ - struct fsl_qdma_queue *queue_head, *queue_temp; - int len, i, j; - int queue_num; - int blocks; - unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; - - queue_num = fsl_qdma->n_queues; - blocks = fsl_qdma->num_blocks; - - len = sizeof(*queue_head) * queue_num * blocks; - queue_head = rte_zmalloc("qdma: queue head", len, 0); - if (!queue_head) - return NULL; + cmd_queue->block_vir = fsl_qdma->block_base + + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id); + cmd_queue->n_cq = QDMA_QUEUE_SIZE; + cmd_queue->queue_id = queue_id; + cmd_queue->block_id = block_id; + cmd_queue->pending_start = 0; + cmd_queue->pending_num = 0; + cmd_queue->complete_start = 0; + + sprintf(nm, "Compound Table_%d_%d", + block_id, queue_id); + cmd_queue->ft = rte_zmalloc(nm, + sizeof(void *) * QDMA_QUEUE_SIZE, 0); + if (!cmd_queue->ft) { + DPAA_QDMA_ERR("%s zmalloc failed!", nm); + rte_free(cmd_queue->cq); + return -ENOMEM; + } + sprintf(nm, "Pending_desc_%d_%d", + block_id, queue_id); + cmd_queue->pending_desc = rte_zmalloc(nm, + sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0); + if (!cmd_queue->pending_desc) { + DPAA_QDMA_ERR("%s zmalloc failed!", nm); + rte_free(cmd_queue->ft); + rte_free(cmd_queue->cq); + return -ENOMEM; + } + sprintf(nm, "complete-burst_ring_%d_%d", + block_id, queue_id); + cmd_queue->complete_burst = rte_ring_create(nm, + QDMA_QUEUE_SIZE * 2, 0, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (!cmd_queue->complete_burst) { + DPAA_QDMA_ERR("%s create failed!", nm); + rte_free(cmd_queue->pending_desc); + rte_free(cmd_queue->ft); + rte_free(cmd_queue->cq); + return -ENOMEM; + } + sprintf(nm, "complete-desc_ring_%d_%d", + block_id, queue_id); + cmd_queue->complete_desc = rte_ring_create(nm, + FSL_QDMA_MAX_DESC_NUM * 2, 0, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (!cmd_queue->complete_desc) { + DPAA_QDMA_ERR("%s create failed!", nm); + rte_ring_free(cmd_queue->complete_burst); + rte_free(cmd_queue->pending_desc); + rte_free(cmd_queue->ft); + rte_free(cmd_queue->cq); + return -ENOMEM; + } + sprintf(nm, "complete-pool-desc_ring_%d_%d", + block_id, queue_id); + cmd_queue->complete_pool = rte_ring_create(nm, + FSL_QDMA_MAX_DESC_NUM * 2, 0, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (!cmd_queue->complete_pool) { + DPAA_QDMA_ERR("%s create failed!", nm); + rte_ring_free(cmd_queue->complete_desc); + rte_ring_free(cmd_queue->complete_burst); + rte_free(cmd_queue->pending_desc); + rte_free(cmd_queue->ft); + rte_free(cmd_queue->cq); + return -ENOMEM; + } - for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++) - queue_size[i] = QDMA_QUEUE_SIZE; + memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats)); + cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM; - for (j = 0; j < blocks; j++) { - for (i = 0; i < queue_num; i++) { - if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || - queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { - DPAA_QDMA_ERR("Get wrong queue-sizes."); - goto fail; - } - queue_temp = queue_head + i + (j * queue_num); - - queue_temp->cq = - dma_pool_alloc(sizeof(struct fsl_qdma_format) * - queue_size[i], - sizeof(struct fsl_qdma_format) * - queue_size[i], &queue_temp->bus_addr); - - if (!queue_temp->cq) - goto fail; - - memset(queue_temp->cq, 0x0, queue_size[i] * - sizeof(struct fsl_qdma_format)); - - queue_temp->block_base = fsl_qdma->block_base + - FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); - queue_temp->n_cq = queue_size[i]; - queue_temp->id = i; - queue_temp->count = 0; - queue_temp->pending = 0; - queue_temp->virt_head = queue_temp->cq; - queue_temp->stats = (struct rte_dma_stats){0}; - } - } - return queue_head; + return 0; +} -fail: - for (j = 0; j < blocks; j++) { - for (i = 0; i < queue_num; i++) { - queue_temp = queue_head + i + (j * queue_num); - dma_pool_free(queue_temp->cq); - } - } - rte_free(queue_head); +static void +fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue) +{ + rte_free(queue->ft); + rte_free(queue->cq); + rte_free(queue->pending_desc); + rte_ring_free(queue->complete_burst); + rte_ring_free(queue->complete_desc); + rte_ring_free(queue->complete_pool); +} - return NULL; +static void +fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue) +{ + rte_free(queue->cq); } -static struct -fsl_qdma_queue *fsl_qdma_prep_status_queue(void) +static int +fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma, + uint32_t block_id) { - struct fsl_qdma_queue *status_head; - unsigned int status_size; + struct fsl_qdma_status_queue *status; + uint32_t status_size; - status_size = QDMA_STATUS_SIZE; - if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || - status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { - DPAA_QDMA_ERR("Get wrong status_size."); - return NULL; - } + status = &fsl_qdma->stat_queues[block_id]; + status->engine = fsl_qdma; - status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0); - if (!status_head) - return NULL; + status_size = QDMA_STATUS_SIZE * + sizeof(struct fsl_qdma_comp_cmd_desc); - /* - * Buffer for queue command - */ - status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) * - status_size, - sizeof(struct fsl_qdma_format) * - status_size, - &status_head->bus_addr); - - if (!status_head->cq) { - rte_free(status_head); - return NULL; - } + status->cq = dma_pool_alloc(NULL, status_size, + status_size, &status->bus_addr); - memset(status_head->cq, 0x0, status_size * - sizeof(struct fsl_qdma_format)); - status_head->n_cq = status_size; - status_head->virt_head = status_head->cq; + if (!status->cq) + return -ENOMEM; - return status_head; + memset(status->cq, 0x0, status_size); + status->n_cq = QDMA_STATUS_SIZE; + status->complete = 0; + status->block_id = block_id; + status->block_vir = fsl_qdma->block_base + + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id); + + return 0; } static int @@ -420,59 +383,41 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) return 0; } -static int -fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma, - void *block, int id, const uint16_t nb_cpls, - uint16_t *last_idx, - enum rte_dma_status_code *status) +static void +fsl_qdma_data_validation(struct fsl_qdma_desc *desc[], + uint8_t num, struct fsl_qdma_queue *fsl_queue) { - struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; - struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id]; - struct fsl_qdma_queue *temp_queue; - struct fsl_qdma_format *status_addr; - struct fsl_qdma_comp *fsl_comp = NULL; - u32 reg, i; - int count = 0; - - while (count < nb_cpls) { - reg = qdma_readl_be(block + FSL_QDMA_BSQSR); - if (reg & FSL_QDMA_BSQSR_QE_BE) - return count; - - status_addr = fsl_status->virt_head; - - i = qdma_ccdf_get_queue(status_addr) + - id * fsl_qdma->n_queues; - temp_queue = fsl_queue + i; - fsl_comp = list_first_entry(&temp_queue->comp_used, - struct fsl_qdma_comp, - list); - list_del(&fsl_comp->list); - - reg = qdma_readl_be(block + FSL_QDMA_BSQMR); - reg |= FSL_QDMA_BSQMR_DI_BE; - - qdma_desc_addr_set64(status_addr, 0x0); - fsl_status->virt_head++; - if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) - fsl_status->virt_head = fsl_status->cq; - qdma_writel_be(reg, block + FSL_QDMA_BSQMR); - *last_idx = fsl_comp->index; - if (status != NULL) - status[count] = RTE_DMA_STATUS_SUCCESSFUL; - - list_add_tail(&fsl_comp->list, &temp_queue->comp_free); - count++; - + uint32_t i, j; + uint8_t *v_src, *v_dst; + char err_msg[512]; + int offset; + + + offset = sprintf(err_msg, "Fatal TC%d/queue%d: ", + fsl_queue->block_id, + fsl_queue->queue_id); + for (i = 0; i < num; i++) { + v_src = rte_mem_iova2virt(desc[i]->src); + v_dst = rte_mem_iova2virt(desc[i]->dst); + for (j = 0; j < desc[i]->len; j++) { + if (v_src[j] != v_dst[j]) { + sprintf(&err_msg[offset], + "job[%"PRIu64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)", + desc[i]->flag, v_src, j, v_src[j], + v_dst, j, v_dst[j]); + DPAA_QDMA_ERR("%s, stop validating!", + err_msg); + return; + } + } } - return count; } static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) { - struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; struct fsl_qdma_queue *temp; + struct fsl_qdma_status_queue *temp_stat; void *ctrl = fsl_qdma->ctrl_base; void *block; u32 i, j; @@ -489,8 +434,8 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) for (j = 0; j < fsl_qdma->num_blocks; j++) { block = fsl_qdma->block_base + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); - for (i = 0; i < fsl_qdma->n_queues; i++) { - temp = fsl_queue + i + (j * fsl_qdma->n_queues); + for (i = 0; i < QDMA_QUEUES; i++) { + temp = &fsl_qdma->cmd_queues[j][i]; /* * Initialize Command Queue registers to * point to the first @@ -510,8 +455,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) /* Initialize the queue mode. */ reg = FSL_QDMA_BCQMR_EN; - reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4); - reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6); + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq)); + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq)); + temp->le_cqmr = reg; qdma_writel(reg, block + FSL_QDMA_BCQMR(i)); } @@ -531,18 +477,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) * Enqueue Pointer Address Registers */ - qdma_writel( - upper_32_bits(fsl_qdma->status[j]->bus_addr), - block + FSL_QDMA_SQEEPAR); - qdma_writel( - lower_32_bits(fsl_qdma->status[j]->bus_addr), - block + FSL_QDMA_SQEPAR); - qdma_writel( - upper_32_bits(fsl_qdma->status[j]->bus_addr), - block + FSL_QDMA_SQEDPAR); - qdma_writel( - lower_32_bits(fsl_qdma->status[j]->bus_addr), - block + FSL_QDMA_SQDPAR); + temp_stat = &fsl_qdma->stat_queues[j]; + qdma_writel(upper_32_bits(temp_stat->bus_addr), + block + FSL_QDMA_SQEEPAR); + qdma_writel(lower_32_bits(temp_stat->bus_addr), + block + FSL_QDMA_SQEPAR); + qdma_writel(upper_32_bits(temp_stat->bus_addr), + block + FSL_QDMA_SQEDPAR); + qdma_writel(lower_32_bits(temp_stat->bus_addr), + block + FSL_QDMA_SQDPAR); /* Desiable status queue interrupt. */ qdma_writel(0x0, block + FSL_QDMA_BCQIER(0)); @@ -551,7 +494,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) /* Initialize the status queue mode. */ reg = FSL_QDMA_BSQMR_EN; - val = ilog2(fsl_qdma->status[j]->n_cq) - 6; + val = ilog2_qsize(temp_stat->n_cq); reg |= FSL_QDMA_BSQMR_CQ_SIZE(val); qdma_writel(reg, block + FSL_QDMA_BSQMR); } @@ -563,159 +506,455 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) return 0; } -static void * -fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst, - dma_addr_t src, size_t len, - void *call_back, - void *param) +static uint16_t +dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma, + uint8_t block_id) { - struct fsl_qdma_comp *fsl_comp; + struct fsl_qdma_status_queue *stat_queue; + struct fsl_qdma_queue *cmd_queue; + struct fsl_qdma_comp_cmd_desc *cq; + uint16_t start, count = 0; + uint8_t qid = 0; + uint32_t reg; + int ret; + uint8_t *block; + uint16_t *dq_complete; + struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY]; - fsl_comp = - fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan); - if (!fsl_comp) - return NULL; + stat_queue = &fsl_qdma->stat_queues[block_id]; + cq = stat_queue->cq; + start = stat_queue->complete; - fsl_comp->qchan = fsl_chan; - fsl_comp->call_back_func = call_back; - fsl_comp->params = param; + block = fsl_qdma->block_base + + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id); - fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len); - return (void *)fsl_comp; + do { + reg = qdma_readl_be(block + FSL_QDMA_BSQSR); + if (reg & FSL_QDMA_BSQSR_QE_BE) + break; + + qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR); + ret = qdma_ccdf_get_queue(&cq[start], &qid); + if (ret == true) { + cmd_queue = &fsl_qdma->cmd_queues[block_id][qid]; + + ret = rte_ring_dequeue(cmd_queue->complete_burst, + (void **)&dq_complete); + if (ret) { + DPAA_QDMA_ERR("DQ desc number failed!"); + break; + } + + ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc, + (void **)desc, *dq_complete, NULL); + if (ret != (*dq_complete)) { + DPAA_QDMA_ERR("DQ %d descs failed!(%d)", + *dq_complete, ret); + break; + } + + fsl_qdma_data_validation(desc, *dq_complete, cmd_queue); + + ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool, + (void **)desc, (*dq_complete), NULL); + if (ret != (*dq_complete)) { + DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s", + ret, *dq_complete, + cmd_queue->complete_pool->name); + break; + } + + cmd_queue->complete_start = + (cmd_queue->complete_start + (*dq_complete)) & + (cmd_queue->pending_max - 1); + cmd_queue->stats.completed++; + + start++; + if (unlikely(start == stat_queue->n_cq)) + start = 0; + count++; + } else { + DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!", + block_id); + break; + } + } while (1); + stat_queue->complete = start; + + return count; } static int -fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan, - struct fsl_qdma_comp *fsl_comp, - uint64_t flags) +fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue, + uint16_t num) { - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; - void *block = fsl_queue->block_base; - struct fsl_qdma_format *ccdf; - u32 reg; + struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine; + uint16_t i, idx, start, dq; + int ret, dq_cnt; - /* retrieve and store the register value in big endian - * to avoid bits swap - */ - reg = qdma_readl_be(block + - FSL_QDMA_BCQSR(fsl_queue->id)); - if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) - return -1; + if (fsl_qdma->is_silent) + return 0; - /* filling descriptor command table */ - ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head; - qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16); - qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr)); - qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr)); - fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq; - fsl_queue->virt_head++; + fsl_queue->desc_in_hw[fsl_queue->ci] = num; +eq_again: + ret = rte_ring_enqueue(fsl_queue->complete_burst, + &fsl_queue->desc_in_hw[fsl_queue->ci]); + if (ret) { + DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first", + __func__); + DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"", + __func__, fsl_queue->stats.submitted, + fsl_queue->stats.completed); + dq_cnt = 0; +dq_again: + dq = dpaa_qdma_block_dequeue(fsl_queue->engine, + fsl_queue->block_id); + dq_cnt++; + if (dq > 0) { + goto eq_again; + } else { + if (dq_cnt < 100) + goto dq_again; + DPAA_QDMA_ERR("%s: Dq block%d failed!", + __func__, fsl_queue->block_id); + } + return ret; + } + start = fsl_queue->pending_start; + for (i = 0; i < num; i++) { + idx = (start + i) & (fsl_queue->pending_max - 1); + ret = rte_ring_enqueue(fsl_queue->complete_desc, + &fsl_queue->pending_desc[idx]); + if (ret) { + DPAA_QDMA_ERR("Descriptors eq failed!"); + return ret; + } + } + + return 0; +} + +static int +fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue) +{ + int overflow = 0; + uint32_t reg; + uint16_t blk_drain, check_num, drain_num; + uint8_t *block = fsl_queue->block_vir; + const struct rte_dma_stats *st = &fsl_queue->stats; + struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine; + + check_num = 0; +overflow_check: + if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) { + reg = qdma_readl_be(block + + FSL_QDMA_BCQSR(fsl_queue->queue_id)); + overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ? + 1 : 0; + } else { + overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >= + QDMA_QUEUE_CR_WM) ? 1 : 0; + } - if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) - fsl_queue->virt_head = fsl_queue->cq; + if (likely(!overflow)) { + return 0; + } else if (fsl_qdma->is_silent) { + check_num++; + if (check_num >= 10000) { + DPAA_QDMA_WARN("Waiting for HW complete in silent mode"); + check_num = 0; + } + goto overflow_check; + } - list_add_tail(&fsl_comp->list, &fsl_queue->comp_used); + DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d", + fsl_queue->block_id, fsl_queue->queue_id, + st->submitted, st->completed, QDMA_QUEUE_CR_WM); + drain_num = 0; + +drain_again: + blk_drain = dpaa_qdma_block_dequeue(fsl_qdma, + fsl_queue->block_id); + if (!blk_drain) { + drain_num++; + if (drain_num >= 10000) { + DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.", + fsl_queue->block_id, fsl_queue->queue_id, + st->submitted - st->completed); + drain_num = 0; + } + goto drain_again; + } + check_num++; + if (check_num >= 1000) { + DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.", + fsl_queue->block_id, fsl_queue->queue_id, + st->submitted - st->completed); + check_num = 0; + } + goto overflow_check; + + return 0; +} - if (flags == RTE_DMA_OP_FLAG_SUBMIT) { - reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id)); - reg |= FSL_QDMA_BCQMR_EI_BE; - qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); - fsl_queue->stats.submitted++; +static int +fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + uint8_t *block = fsl_queue->block_vir; + struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest; + struct fsl_qdma_cmpd_ft *ft; + int ret; +#ifdef RTE_DMA_DPAA_ERRATA_ERR050757 + struct fsl_qdma_sdf *sdf; +#endif + + ret = fsl_qdma_enqueue_overflow(fsl_queue); + if (unlikely(ret)) + return ret; + + ft = fsl_queue->ft[fsl_queue->ci]; + +#ifdef RTE_DMA_DPAA_ERRATA_ERR050757 + sdf = &ft->df.sdf; + sdf->srttype = FSL_QDMA_CMD_RWTTYPE; +#ifdef RTE_DMA_DPAA_ERRATA_ERR050265 + sdf->prefetch = 1; +#endif + if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) { + sdf->ssen = 1; + sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN; + sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN; } else { - fsl_queue->pending++; + sdf->ssen = 0; + sdf->sss = 0; + sdf->ssd = 0; } - return fsl_comp->index; +#endif + csgf_src = &ft->desc_sbuf; + csgf_dest = &ft->desc_dbuf; + qdma_desc_sge_addr_set64(csgf_src, src); + csgf_src->length = len; + csgf_src->extion = 0; + qdma_desc_sge_addr_set64(csgf_dest, dst); + csgf_dest->length = len; + csgf_dest->extion = 0; + /* This entry is the last entry. */ + csgf_dest->final = 1; + + ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1); + if (ret) + return ret; + fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1); + + qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI, + block + FSL_QDMA_BCQMR(fsl_queue->queue_id)); + fsl_queue->stats.submitted++; + + return 0; } static int -fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan) +fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue) { - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; - struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; + uint8_t *block = fsl_queue->block_vir; + struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest; + struct fsl_qdma_cmpd_ft *ft; + uint32_t total_len; + uint16_t start, idx, num, i, next_idx; int ret; +#ifdef RTE_DMA_DPAA_ERRATA_ERR050757 + struct fsl_qdma_sdf *sdf; +#endif + +eq_sg: + total_len = 0; + start = fsl_queue->pending_start; + if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz || + fsl_queue->pending_num == 1) { + ret = fsl_qdma_enqueue_desc_single(fsl_queue, + fsl_queue->pending_desc[start].dst, + fsl_queue->pending_desc[start].src, + fsl_queue->pending_desc[start].len); + if (!ret) { + fsl_queue->pending_start = + (start + 1) & (fsl_queue->pending_max - 1); + fsl_queue->pending_num--; + } + if (fsl_queue->pending_num > 0) + goto eq_sg; - if (fsl_queue->count++) - goto finally; + return ret; + } - INIT_LIST_HEAD(&fsl_queue->comp_free); - INIT_LIST_HEAD(&fsl_queue->comp_used); + ret = fsl_qdma_enqueue_overflow(fsl_queue); + if (unlikely(ret)) + return ret; - ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue, - FSL_QDMA_COMMAND_BUFFER_SIZE, 64); - if (ret) { - DPAA_QDMA_ERR( - "failed to alloc dma buffer for comp descriptor"); - goto exit; + if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY) + num = FSL_QDMA_SG_MAX_ENTRY; + else + num = fsl_queue->pending_num; + + ft = fsl_queue->ft[fsl_queue->ci]; + csgf_src = &ft->desc_sbuf; + csgf_dest = &ft->desc_dbuf; + + qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge); + csgf_src->extion = 1; + qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge); + csgf_dest->extion = 1; + /* This entry is the last entry. */ + csgf_dest->final = 1; + for (i = 0; i < num; i++) { + idx = (start + i) & (fsl_queue->pending_max - 1); + qdma_desc_sge_addr_set64(&ft->desc_ssge[i], + fsl_queue->pending_desc[idx].src); + ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len; + ft->desc_ssge[i].final = 0; + qdma_desc_sge_addr_set64(&ft->desc_dsge[i], + fsl_queue->pending_desc[idx].dst); + ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len; + ft->desc_dsge[i].final = 0; + total_len += fsl_queue->pending_desc[idx].len; + if ((i + 1) != num) { + next_idx = (idx + 1) & (fsl_queue->pending_max - 1); + if (fsl_queue->pending_desc[next_idx].len > + s_sg_max_entry_sz) { + num = i + 1; + break; + } + } } -finally: - return fsl_qdma->desc_allocated++; + ft->desc_ssge[num - 1].final = 1; + ft->desc_dsge[num - 1].final = 1; + csgf_src->length = total_len; + csgf_dest->length = total_len; +#ifdef RTE_DMA_DPAA_ERRATA_ERR050757 + sdf = &ft->df.sdf; + sdf->srttype = FSL_QDMA_CMD_RWTTYPE; +#ifdef RTE_DMA_DPAA_ERRATA_ERR050265 + sdf->prefetch = 1; +#endif + if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) { + sdf->ssen = 1; + sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN; + sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN; + } else { + sdf->ssen = 0; + sdf->sss = 0; + sdf->ssd = 0; + } +#endif + ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num); + if (ret) + return ret; -exit: - return -ENOMEM; + fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1); + + qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI, + block + FSL_QDMA_BCQMR(fsl_queue->queue_id)); + fsl_queue->stats.submitted++; + + fsl_queue->pending_start = + (start + num) & (fsl_queue->pending_max - 1); + fsl_queue->pending_num -= num; + if (fsl_queue->pending_num > 0) + goto eq_sg; + + return 0; } static int -dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, - uint32_t info_sz) +fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue) { -#define DPAADMA_MAX_DESC 64 -#define DPAADMA_MIN_DESC 64 + uint16_t start = fsl_queue->pending_start; + int ret; - RTE_SET_USED(dev); - RTE_SET_USED(info_sz); + if (fsl_queue->pending_num == 1) { + ret = fsl_qdma_enqueue_desc_single(fsl_queue, + fsl_queue->pending_desc[start].dst, + fsl_queue->pending_desc[start].src, + fsl_queue->pending_desc[start].len); + if (!ret) { + fsl_queue->pending_start = + (start + 1) & (fsl_queue->pending_max - 1); + fsl_queue->pending_num = 0; + } + return ret; + } + + return fsl_qdma_enqueue_desc_sg(fsl_queue); +} + +static int +dpaa_qdma_info_get(const struct rte_dma_dev *dev, + struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz) +{ + struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private; dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | - RTE_DMA_CAPA_MEM_TO_DEV | - RTE_DMA_CAPA_DEV_TO_DEV | - RTE_DMA_CAPA_DEV_TO_MEM | - RTE_DMA_CAPA_SILENT | - RTE_DMA_CAPA_OPS_COPY; - dev_info->max_vchans = 1; - dev_info->max_desc = DPAADMA_MAX_DESC; - dev_info->min_desc = DPAADMA_MIN_DESC; + RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY | + RTE_DMA_CAPA_OPS_COPY_SG; + dev_info->dev_capa |= DPAA_QDMA_FLAGS_INDEX; + dev_info->max_vchans = fsl_qdma->n_queues; + dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM; + dev_info->min_desc = QDMA_QUEUE_SIZE; + dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY; return 0; } static int -dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma, uint16_t vchan) +dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma, + uint16_t vchan) { - u32 i, start, end; - int ret; - - start = fsl_qdma->free_block_id * QDMA_QUEUES; - fsl_qdma->free_block_id++; + int ret, i, j, found = 0; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; - end = start + 1; - for (i = start; i < end; i++) { - struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; + if (fsl_queue) { + found = 1; + goto queue_found; + } - if (fsl_chan->free) { - fsl_chan->free = false; - ret = fsl_qdma_alloc_chan_resources(fsl_chan); - if (ret) - return ret; + for (i = 0; i < QDMA_BLOCKS; i++) { + for (j = 0; j < QDMA_QUEUES; j++) { + fsl_queue = &fsl_qdma->cmd_queues[i][j]; - fsl_qdma->vchan_map[vchan] = i; - return 0; + if (fsl_queue->channel_id == vchan) { + found = 1; + fsl_qdma->chan[vchan] = fsl_queue; + goto queue_found; + } } } - return -1; -} +queue_found: + if (!found) + return -ENXIO; -static void -dma_release(void *fsl_chan) -{ - ((struct fsl_qdma_chan *)fsl_chan)->free = true; - fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan); + if (fsl_queue->used) + return 0; + + ret = fsl_qdma_pre_comp_sd_desc(fsl_queue); + if (ret) + return ret; + + fsl_queue->used = 1; + fsl_qdma->block_queues[fsl_queue->block_id]++; + + return 0; } static int -dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev, - __rte_unused const struct rte_dma_conf *dev_conf, - __rte_unused uint32_t conf_sz) +dpaa_qdma_configure(struct rte_dma_dev *dmadev, + const struct rte_dma_conf *dev_conf, + __rte_unused uint32_t conf_sz) { + struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private; + + fsl_qdma->is_silent = dev_conf->enable_silent; return 0; } @@ -745,148 +984,263 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev, static int dpaa_qdma_submit(void *dev_private, uint16_t vchan) { - struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private; - struct fsl_qdma_chan *fsl_chan = - &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]]; - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; - void *block = fsl_queue->block_base; - u32 reg; + struct fsl_qdma_engine *fsl_qdma = dev_private; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; - while (fsl_queue->pending) { - reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id)); - reg |= FSL_QDMA_BCQMR_EI_BE; - qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); - fsl_queue->pending--; - fsl_queue->stats.submitted++; - } + if (!fsl_queue->pending_num) + return 0; - return 0; + return fsl_qdma_enqueue_desc(fsl_queue); } static int dpaa_qdma_enqueue(void *dev_private, uint16_t vchan, - rte_iova_t src, rte_iova_t dst, - uint32_t length, uint64_t flags) + rte_iova_t src, rte_iova_t dst, + uint32_t length, uint64_t flags) { - struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private; - struct fsl_qdma_chan *fsl_chan = - &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]]; + struct fsl_qdma_engine *fsl_qdma = dev_private; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; + uint16_t start = fsl_queue->pending_start; + uint8_t pending = fsl_queue->pending_num; + uint16_t idx; int ret; - void *fsl_comp = NULL; + if (pending >= fsl_queue->pending_max) { + DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d", + pending, vchan); + return -ENOSPC; + } + idx = (start + pending) & (fsl_queue->pending_max - 1); - fsl_comp = fsl_qdma_prep_memcpy(fsl_chan, - (dma_addr_t)dst, (dma_addr_t)src, - length, NULL, NULL); - if (!fsl_comp) { - DPAA_QDMA_DP_DEBUG("fsl_comp is NULL"); - return -1; + fsl_queue->pending_desc[idx].src = src; + fsl_queue->pending_desc[idx].dst = dst; + fsl_queue->pending_desc[idx].flag = + DPAA_QDMA_IDX_FROM_FLAG(flags); + fsl_queue->pending_desc[idx].len = length; + fsl_queue->pending_num++; + + if (!(flags & RTE_DMA_OP_FLAG_SUBMIT)) + return idx; + + ret = fsl_qdma_enqueue_desc(fsl_queue); + if (!ret) + return fsl_queue->pending_start; + + return ret; +} + +static int +dpaa_qdma_copy_sg(void *dev_private, + uint16_t vchan, + const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, + uint16_t nb_src, uint16_t nb_dst, + uint64_t flags) +{ + int ret; + uint16_t i, start, idx; + struct fsl_qdma_engine *fsl_qdma = dev_private; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; + const uint16_t *idx_addr = NULL; + + if (unlikely(nb_src != nb_dst)) { + DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on queue%d", + __func__, nb_src, nb_dst, vchan); + return -EINVAL; + } + + if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) { + DPAA_QDMA_ERR("Too many pending jobs on queue%d", + vchan); + return -ENOSPC; + } + start = fsl_queue->pending_start + fsl_queue->pending_num; + start = start & (fsl_queue->pending_max - 1); + idx = start; + + idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags); + + for (i = 0; i < nb_src; i++) { + if (unlikely(src[i].length != dst[i].length)) { + DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)", + src[i].length, dst[i].length); + return -EINVAL; + } + idx = (start + i) & (fsl_queue->pending_max - 1); + fsl_queue->pending_desc[idx].src = src[i].addr; + fsl_queue->pending_desc[idx].dst = dst[i].addr; + fsl_queue->pending_desc[idx].len = dst[i].length; + fsl_queue->pending_desc[idx].flag = idx_addr[i]; } - ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags); + fsl_queue->pending_num += nb_src; + + if (!(flags & RTE_DMA_OP_FLAG_SUBMIT)) + return idx; + + ret = fsl_qdma_enqueue_desc(fsl_queue); + if (!ret) + return fsl_queue->pending_start; return ret; } +static int +dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg) +{ + struct fsl_qdma_err_reg local; + size_t i, offset = 0; + char err_msg[512]; + + local.dedr_be = rte_read32(®->dedr_be); + if (!local.dedr_be) + return 0; + offset = sprintf(err_msg, "ERR detected:"); + if (local.dedr.ere) { + offset += sprintf(&err_msg[offset], + " ere(Enqueue rejection error)"); + } + if (local.dedr.dde) { + offset += sprintf(&err_msg[offset], + " dde(Destination descriptor error)"); + } + if (local.dedr.sde) { + offset += sprintf(&err_msg[offset], + " sde(Source descriptor error)"); + } + if (local.dedr.cde) { + offset += sprintf(&err_msg[offset], + " cde(Command descriptor error)"); + } + if (local.dedr.wte) { + offset += sprintf(&err_msg[offset], + " wte(Write transaction error)"); + } + if (local.dedr.rte) { + offset += sprintf(&err_msg[offset], + " rte(Read transaction error)"); + } + if (local.dedr.me) { + offset += sprintf(&err_msg[offset], + " me(Multiple errors of the same type)"); + } + DPAA_QDMA_ERR("%s", err_msg); + for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) { + local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] = + QDMA_IN(®->deccd_le[i]); + } + local.deccqidr_be = rte_read32(®->deccqidr_be); + local.decbr = rte_read32(®->decbr); + + offset = sprintf(err_msg, "ERR command:"); + offset += sprintf(&err_msg[offset], + " status: %02x, ser: %d, offset:%d, fmt: %02x", + local.err_cmd.status, local.err_cmd.ser, + local.err_cmd.offset, local.err_cmd.format); + offset += sprintf(&err_msg[offset], + " address: 0x%"PRIx64", queue: %d, dd: %02x", + (uint64_t)local.err_cmd.addr_hi << 32 | + local.err_cmd.addr_lo, + local.err_cmd.queue, local.err_cmd.dd); + DPAA_QDMA_ERR("%s", err_msg); + DPAA_QDMA_ERR("ERR command block: %d, queue: %d", + local.deccqidr.block, local.deccqidr.queue); + + rte_write32(local.dedr_be, ®->dedr_be); + + return -EIO; +} + static uint16_t dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan, - const uint16_t nb_cpls, uint16_t *last_idx, - enum rte_dma_status_code *st) + const uint16_t nb_cpls, uint16_t *last_idx, + enum rte_dma_status_code *st) { - struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private; - int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES); - void *block; - int intr; + struct fsl_qdma_engine *fsl_qdma = dev_private; + int err; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; void *status = fsl_qdma->status_base; - struct fsl_qdma_chan *fsl_chan = - &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]]; - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; - - intr = qdma_readl_be(status + FSL_QDMA_DEDR); - if (intr) { - DPAA_QDMA_ERR("DMA transaction error! %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW0R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW1R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW2R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW3R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFQIDR); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECBR); - DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr); - qdma_writel(0xffffffff, - status + FSL_QDMA_DEDR); - intr = qdma_readl(status + FSL_QDMA_DEDR); - fsl_queue->stats.errors++; + struct fsl_qdma_desc *desc_complete[nb_cpls]; + uint16_t i, dq_num; + + if (unlikely(fsl_qdma->is_silent)) { + DPAA_QDMA_WARN("Can't dq in silent mode"); + + return 0; } - block = fsl_qdma->block_base + - FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id); + dq_num = dpaa_qdma_block_dequeue(fsl_qdma, + fsl_queue->block_id); + DPAA_QDMA_DP_DEBUG("%s: block dq(%d)", + __func__, dq_num); - intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls, - last_idx, st); - fsl_queue->stats.completed += intr; + dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool, + (void **)desc_complete, nb_cpls, NULL); + for (i = 0; i < dq_num; i++) + last_idx[i] = desc_complete[i]->flag; - return intr; -} + if (st) { + for (i = 0; i < dq_num; i++) + st[i] = RTE_DMA_STATUS_SUCCESSFUL; + } + + if (s_hw_err_check) { + err = dpaa_qdma_err_handle(status + + FSL_QDMA_ERR_REG_STATUS_OFFSET); + if (err) + fsl_queue->stats.errors++; + } + return dq_num; +} static uint16_t dpaa_qdma_dequeue(void *dev_private, - uint16_t vchan, const uint16_t nb_cpls, - uint16_t *last_idx, bool *has_error) + uint16_t vchan, const uint16_t nb_cpls, + uint16_t *last_idx, bool *has_error) { - struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private; - int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES); - void *block; - int intr; + struct fsl_qdma_engine *fsl_qdma = dev_private; + int err; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; void *status = fsl_qdma->status_base; - struct fsl_qdma_chan *fsl_chan = - &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]]; - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; - - intr = qdma_readl_be(status + FSL_QDMA_DEDR); - if (intr) { - DPAA_QDMA_ERR("DMA transaction error! %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW0R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW1R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW2R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFDW3R); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECFQIDR); - DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr); - intr = qdma_readl(status + FSL_QDMA_DECBR); - DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr); - qdma_writel(0xffffffff, - status + FSL_QDMA_DEDR); - intr = qdma_readl(status + FSL_QDMA_DEDR); - *has_error = true; - fsl_queue->stats.errors++; - } + struct fsl_qdma_desc *desc_complete[nb_cpls]; + uint16_t i, dq_num; - block = fsl_qdma->block_base + - FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id); + if (unlikely(fsl_qdma->is_silent)) { + DPAA_QDMA_WARN("Can't dq in silent mode"); - intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls, - last_idx, NULL); - fsl_queue->stats.completed += intr; + return 0; + } - return intr; + *has_error = false; + dq_num = dpaa_qdma_block_dequeue(fsl_qdma, + fsl_queue->block_id); + DPAA_QDMA_DP_DEBUG("%s: block dq(%d)", + __func__, dq_num); + + dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool, + (void **)desc_complete, nb_cpls, NULL); + for (i = 0; i < dq_num; i++) + last_idx[i] = desc_complete[i]->flag; + + if (s_hw_err_check) { + err = dpaa_qdma_err_handle(status + + FSL_QDMA_ERR_REG_STATUS_OFFSET); + if (err) { + if (has_error) + *has_error = true; + fsl_queue->stats.errors++; + } + } + + return dq_num; } static int -dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan, - struct rte_dma_stats *rte_stats, uint32_t size) +dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, + uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size) { struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private; - struct fsl_qdma_chan *fsl_chan = - &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]]; - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; struct rte_dma_stats *stats = &fsl_queue->stats; if (size < sizeof(rte_stats)) @@ -903,17 +1257,24 @@ static int dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan) { struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private; - struct fsl_qdma_chan *fsl_chan = - &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]]; - struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; - fsl_queue->stats = (struct rte_dma_stats){0}; + memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats)); return 0; } +static uint16_t +dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan) +{ + const struct fsl_qdma_engine *fsl_qdma = dev_private; + struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan]; + + return fsl_queue->pending_max - fsl_queue->pending_num; +} + static struct rte_dma_dev_ops dpaa_qdma_ops = { - .dev_info_get = dpaa_info_get, + .dev_info_get = dpaa_qdma_info_get, .dev_configure = dpaa_qdma_configure, .dev_start = dpaa_qdma_start, .dev_close = dpaa_qdma_close, @@ -922,94 +1283,126 @@ static struct rte_dma_dev_ops dpaa_qdma_ops = { .stats_reset = dpaa_qdma_stats_reset, }; +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +dpaa_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } + + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + static int dpaa_qdma_init(struct rte_dma_dev *dmadev) { struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private; - struct fsl_qdma_chan *fsl_chan; uint64_t phys_addr; - unsigned int len; int ccsr_qdma_fd; int regs_size; int ret; - u32 i; + uint32_t i, j, k; - fsl_qdma->desc_allocated = 0; - fsl_qdma->n_chans = VIRT_CHANNELS; - fsl_qdma->n_queues = QDMA_QUEUES; - fsl_qdma->num_blocks = QDMA_BLOCKS; - fsl_qdma->block_offset = QDMA_BLOCK_OFFSET; - - len = sizeof(*fsl_chan) * fsl_qdma->n_chans; - fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0); - if (!fsl_qdma->chans) - return -1; - - len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks; - fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0); - if (!fsl_qdma->status) { - rte_free(fsl_qdma->chans); - return -1; + if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) { + s_hw_err_check = true; + DPAA_QDMA_INFO("Enable DMA error checks"); } - for (i = 0; i < fsl_qdma->num_blocks; i++) { - rte_atomic32_init(&wait_task[i]); - fsl_qdma->status[i] = fsl_qdma_prep_status_queue(); - if (!fsl_qdma->status[i]) - goto err; - } + fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS; + fsl_qdma->num_blocks = QDMA_BLOCKS; + fsl_qdma->block_offset = QDMA_BLOCK_OFFSET; ccsr_qdma_fd = open("/dev/mem", O_RDWR); if (unlikely(ccsr_qdma_fd < 0)) { DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map"); - goto err; + return ccsr_qdma_fd; } - regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2); + regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks; + regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE); phys_addr = QDMA_CCSR_BASE; - fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ | - PROT_WRITE, MAP_SHARED, - ccsr_qdma_fd, phys_addr); + fsl_qdma->reg_base = mmap(NULL, regs_size, + PROT_READ | PROT_WRITE, MAP_SHARED, + ccsr_qdma_fd, phys_addr); close(ccsr_qdma_fd); - if (fsl_qdma->ctrl_base == MAP_FAILED) { - DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64 - "size %d", phys_addr, regs_size); - goto err; + if (fsl_qdma->reg_base == MAP_FAILED) { + DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIx64"), size(%d)", + phys_addr, regs_size); + return -ENOMEM; } - fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET; - fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET; - - fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma); - if (!fsl_qdma->queue) { - munmap(fsl_qdma->ctrl_base, regs_size); - goto err; + fsl_qdma->ctrl_base = + fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET; + fsl_qdma->status_base = + fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET; + fsl_qdma->block_base = + fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE; + + for (i = 0; i < QDMA_BLOCKS; i++) { + ret = fsl_qdma_prep_status_queue(fsl_qdma, i); + if (ret) + goto mem_free; } - for (i = 0; i < fsl_qdma->n_chans; i++) { - struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; - - fsl_chan->qdma = fsl_qdma; - fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues * - fsl_qdma->num_blocks); - fsl_chan->free = true; + k = 0; + for (i = 0; i < QDMA_QUEUES; i++) { + for (j = 0; j < QDMA_BLOCKS; j++) { + ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j); + if (ret) + goto mem_free; + fsl_qdma->cmd_queues[j][i].channel_id = k; + k++; + } } ret = fsl_qdma_reg_init(fsl_qdma); if (ret) { DPAA_QDMA_ERR("Can't Initialize the qDMA engine."); - munmap(fsl_qdma->ctrl_base, regs_size); - goto err; + goto mem_free; } return 0; -err: - rte_free(fsl_qdma->chans); - rte_free(fsl_qdma->status); +mem_free: + for (i = 0; i < fsl_qdma->num_blocks; i++) + fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]); - return -1; + for (i = 0; i < fsl_qdma->num_blocks; i++) { + for (j = 0; j < QDMA_QUEUES; j++) + fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]); + } + + munmap(fsl_qdma->ctrl_base, regs_size); + + return ret; } static int @@ -1032,9 +1425,11 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv, dmadev->device = &dpaa_dev->device; dmadev->fp_obj->dev_private = dmadev->data->dev_private; dmadev->fp_obj->copy = dpaa_qdma_enqueue; + dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg; dmadev->fp_obj->submit = dpaa_qdma_submit; dmadev->fp_obj->completed = dpaa_qdma_dequeue; dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status; + dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity; /* Invoke PMD device initialization function */ ret = dpaa_qdma_init(dmadev); @@ -1052,17 +1447,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev) { struct rte_dma_dev *dmadev = dpaa_dev->dmadev; struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private; - int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS; + uint32_t i, j, regs_size; + + regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks; + regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE); - for (i = 0; i < max; i++) { - struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; + for (i = 0; i < QDMA_BLOCKS; i++) + fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]); - if (fsl_chan->free == false) - dma_release(fsl_chan); + for (i = 0; i < QDMA_BLOCKS; i++) { + for (j = 0; j < QDMA_QUEUES; j++) + fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]); } - rte_free(fsl_qdma->status); - rte_free(fsl_qdma->chans); + munmap(fsl_qdma->ctrl_base, regs_size); (void)rte_dma_pmd_release(dpaa_dev->device.name); @@ -1078,4 +1476,5 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = { }; RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd); +RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "="); RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO); diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h index 7e9e76e21a..91eaf1455a 100644 --- a/drivers/dma/dpaa/dpaa_qdma.h +++ b/drivers/dma/dpaa/dpaa_qdma.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2021 NXP + * Copyright 2021-2024 NXP */ #ifndef _DPAA_QDMA_H_ @@ -11,7 +11,6 @@ #define BIT(nr) (1UL << (nr)) #endif -#define CORE_NUMBER 4 #define RETRIES 5 #ifndef GENMASK @@ -20,6 +19,19 @@ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) #endif +#define QDMA_CTRL_REGION_OFFSET 0 +#define QDMA_CTRL_REGION_SIZE 0x10000 +#define QDMA_STATUS_REGION_OFFSET \ + (QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE) +#define QDMA_STATUS_REGION_SIZE 0x10000 + +#define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63) +#define DPAA_QDMA_COPY_IDX_OFFSET 8 +#define DPAA_QDMA_SG_IDX_ADDR_ALIGN \ + RTE_BIT64(DPAA_QDMA_COPY_IDX_OFFSET) +#define DPAA_QDMA_SG_IDX_ADDR_MASK \ + (DPAA_QDMA_SG_IDX_ADDR_ALIGN - 1) + #define FSL_QDMA_DMR 0x0 #define FSL_QDMA_DSR 0x4 #define FSL_QDMA_DEDR 0xe04 @@ -54,53 +66,41 @@ #define FSL_QDMA_QUEUE_MAX 8 #define FSL_QDMA_BCQMR_EN 0x80000000 -#define FSL_QDMA_BCQMR_EI_BE 0x40 +#define FSL_QDMA_BCQMR_EI 0x40000000 + #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) #define FSL_QDMA_BCQSR_QF_XOFF_BE 0x1000100 #define FSL_QDMA_BSQMR_EN 0x80000000 -#define FSL_QDMA_BSQMR_DI_BE 0x40 #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) +#define FSL_QDMA_BSQMR_DI 0xc0 #define FSL_QDMA_BSQSR_QE_BE 0x200 #define FSL_QDMA_DMR_DQD 0x40000000 #define FSL_QDMA_DSR_DB 0x80000000 -#define FSL_QDMA_COMMAND_BUFFER_SIZE 64 -#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384 #define FSL_QDMA_QUEUE_NUM_MAX 8 +#define FSL_QDMA_COMP_SG_FORMAT 0x1 + #define FSL_QDMA_CMD_RWTTYPE 0x4 #define FSL_QDMA_CMD_LWC 0x2 -#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 -#define FSL_QDMA_CMD_LWC_OFFSET 16 - -#define QDMA_CCDF_STATUS 20 -#define QDMA_CCDF_OFFSET 20 -#define QDMA_CCDF_MASK GENMASK(28, 20) -#define QDMA_CCDF_FOTMAT BIT(29) -#define QDMA_CCDF_SER BIT(30) - -#define QDMA_SG_FIN BIT(30) -#define QDMA_SG_LEN_MASK GENMASK(29, 0) - -#define COMMAND_QUEUE_OVERFLOW 10 +#define FSL_QDMA_CMD_SS_ERR050757_LEN 128 /* qdma engine attribute */ -#define QDMA_QUEUE_SIZE 64 -#define QDMA_STATUS_SIZE 64 -#define QDMA_CCSR_BASE 0x8380000 -#define VIRT_CHANNELS 32 -#define QDMA_BLOCK_OFFSET 0x10000 -#define QDMA_BLOCKS 4 -#define QDMA_QUEUES 8 -#define QDMA_DELAY 1000 +#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN +#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE +#define QDMA_CCSR_BASE 0x8380000 +#define QDMA_BLOCK_OFFSET 0x10000 +#define QDMA_BLOCKS 4 +#define QDMA_QUEUES 8 +#define QDMA_QUEUE_CR_WM 32 #define QDMA_BIG_ENDIAN 1 #ifdef QDMA_BIG_ENDIAN @@ -118,89 +118,191 @@ #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \ (((fsl_qdma_engine)->block_offset) * (x)) -typedef void (*dma_call_back)(void *params); - /* qDMA Command Descriptor Formats */ -struct fsl_qdma_format { - __le32 status; /* ser, status */ - __le32 cfg; /* format, offset */ - union { - struct { - __le32 addr_lo; /* low 32-bits of 40-bit address */ - u8 addr_hi; /* high 8-bits of 40-bit address */ - u8 __reserved1[2]; - u8 cfg8b_w1; /* dd, queue */ - }; - __le64 data; - }; -}; +struct fsl_qdma_comp_cmd_desc { + uint8_t status; + uint32_t rsv0:22; + uint32_t ser:1; + uint32_t rsv1:21; + uint32_t offset:9; + uint32_t format:3; + uint32_t addr_lo; + uint8_t addr_hi; + uint16_t rsv3; + uint8_t queue:3; + uint8_t rsv4:3; + uint8_t dd:2; +} __rte_packed; + +struct fsl_qdma_comp_sg_desc { + uint32_t offset:13; + uint32_t rsv0:19; + uint32_t length:30; + uint32_t final:1; + uint32_t extion:1; + uint32_t addr_lo; + uint8_t addr_hi; + uint32_t rsv1:24; +} __rte_packed; -/* qDMA Source Descriptor Format */ struct fsl_qdma_sdf { - __le32 rev3; - __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */ - __le32 rev5; - __le32 cmd; -}; + uint32_t rsv0; + uint32_t ssd:12; + uint32_t sss:12; + uint32_t rsv1:8; + uint32_t rsv2; + + uint32_t rsv3:17; + uint32_t prefetch:1; + uint32_t rsv4:1; + uint32_t ssen:1; + uint32_t rthrotl:4; + uint32_t sqos:3; + uint32_t ns:1; + uint32_t srttype:4; +} __rte_packed; -/* qDMA Destination Descriptor Format */ struct fsl_qdma_ddf { - __le32 rev1; - __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */ - __le32 rev3; - __le32 cmd; + uint32_t rsv0; + uint32_t dsd:12; + uint32_t dss:12; + uint32_t rsv1:8; + uint32_t rsv2; + + uint16_t rsv3; + uint32_t lwc:2; + uint32_t rsv4:1; + uint32_t dsen:1; + uint32_t wthrotl:4; + uint32_t dqos:3; + uint32_t ns:1; + uint32_t dwttype:4; +} __rte_packed; + +struct fsl_qdma_df { + struct fsl_qdma_sdf sdf; + struct fsl_qdma_ddf ddf; +}; + +#define FSL_QDMA_SG_MAX_ENTRY 64 +#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE) +struct fsl_qdma_cmpd_ft { + struct fsl_qdma_comp_sg_desc desc_buf; + struct fsl_qdma_comp_sg_desc desc_sbuf; + struct fsl_qdma_comp_sg_desc desc_dbuf; + uint64_t cache_align[2]; + struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY]; + struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY]; + struct fsl_qdma_df df; + uint64_t phy_ssge; + uint64_t phy_dsge; + uint64_t phy_df; +} __rte_packed; + +#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00 + +struct fsl_qdma_dedr_reg { + uint32_t me:1; + uint32_t rsv0:1; + uint32_t rte:1; + uint32_t wte:1; + uint32_t cde:1; + uint32_t sde:1; + uint32_t dde:1; + uint32_t ere:1; + uint32_t rsv1:24; }; -struct fsl_qdma_chan { - struct fsl_qdma_engine *qdma; - struct fsl_qdma_queue *queue; - bool free; - struct list_head list; +struct fsl_qdma_deccqidr_reg { + uint32_t rsv:27; + uint32_t block:2; + uint32_t queue:3; +}; + +#define FSL_QDMA_DECCD_ERR_NUM \ + (sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t)) + +struct fsl_qdma_err_reg { + uint32_t deier; + union { + rte_be32_t dedr_be; + struct fsl_qdma_dedr_reg dedr; + }; + uint32_t rsv0[2]; + union { + rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM]; + struct fsl_qdma_comp_cmd_desc err_cmd; + }; + uint32_t rsv1[4]; + union { + rte_be32_t deccqidr_be; + struct fsl_qdma_deccqidr_reg deccqidr; + }; + rte_be32_t decbr; +}; + +#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \ + ((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK))) + +#define DPAA_QDMA_IDX_FROM_FLAG(flag) \ + ((flag) >> DPAA_QDMA_COPY_IDX_OFFSET) + +struct fsl_qdma_desc { + rte_iova_t src; + rte_iova_t dst; + uint64_t flag; + uint64_t len; }; struct fsl_qdma_queue { - struct fsl_qdma_format *virt_head; - struct list_head comp_used; - struct list_head comp_free; - dma_addr_t bus_addr; - u32 n_cq; - u32 id; - u32 count; - u32 pending; - struct fsl_qdma_format *cq; - void *block_base; - struct rte_dma_stats stats; + int used; + struct fsl_qdma_cmpd_ft **ft; + uint16_t ci; + struct rte_ring *complete_burst; + struct rte_ring *complete_desc; + struct rte_ring *complete_pool; + uint16_t n_cq; + uint8_t block_id; + uint8_t queue_id; + uint8_t channel_id; + void *block_vir; + uint32_t le_cqmr; + struct fsl_qdma_comp_cmd_desc *cq; + uint16_t desc_in_hw[QDMA_QUEUE_SIZE]; + struct rte_dma_stats stats; + struct fsl_qdma_desc *pending_desc; + uint16_t pending_max; + uint16_t pending_start; + uint16_t pending_num; + uint16_t complete_start; + dma_addr_t bus_addr; + void *engine; }; -struct fsl_qdma_comp { - dma_addr_t bus_addr; - dma_addr_t desc_bus_addr; - void *virt_addr; - int index; - void *desc_virt_addr; - struct fsl_qdma_chan *qchan; - dma_call_back call_back_func; - void *params; - struct list_head list; +struct fsl_qdma_status_queue { + uint16_t n_cq; + uint16_t complete; + uint8_t block_id; + void *block_vir; + struct fsl_qdma_comp_cmd_desc *cq; + struct rte_dma_stats stats; + dma_addr_t bus_addr; + void *engine; }; struct fsl_qdma_engine { - int desc_allocated; - void *ctrl_base; - void *status_base; - void *block_base; - u32 n_chans; - u32 n_queues; - int error_irq; - struct fsl_qdma_queue *queue; - struct fsl_qdma_queue **status; - struct fsl_qdma_chan *chans; - u32 num_blocks; - u8 free_block_id; - u32 vchan_map[4]; - int block_offset; + void *reg_base; + void *ctrl_base; + void *status_base; + void *block_base; + uint32_t n_queues; + uint8_t block_queues[QDMA_BLOCKS]; + struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES]; + struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS]; + struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES]; + uint32_t num_blocks; + int block_offset; + int is_silent; }; -static rte_atomic32_t wait_task[CORE_NUMBER]; - #endif /* _DPAA_QDMA_H_ */ diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c index 5780e49297..3c9a7b5485 100644 --- a/drivers/dma/dpaa2/dpaa2_qdma.c +++ b/drivers/dma/dpaa2/dpaa2_qdma.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2022 NXP + * Copyright 2018-2024 NXP */ #include @@ -10,253 +10,392 @@ #include -#include "rte_pmd_dpaa2_qdma.h" +#include +#include "dpaa2_hw_dpio.h" #include "dpaa2_qdma.h" #include "dpaa2_qdma_logs.h" -#define DPAA2_QDMA_PREFETCH "prefetch" +#define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate" +#define DPAA2_QDMA_DESC_DEBUG "desc_debug" +#define DPAA2_QDMA_USING_SHORT_FD "short_fd" -uint32_t dpaa2_coherent_no_alloc_cache; -uint32_t dpaa2_coherent_alloc_cache; +static uint32_t dpaa2_coherent_no_alloc_cache; +static uint32_t dpaa2_coherent_alloc_cache; -static inline int -qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest, - uint32_t len, struct qbman_fd *fd, - struct rte_dpaa2_qdma_rbp *rbp, int ser) +static struct fsl_mc_io s_proc_mc_reg; + +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) { - fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src)); - fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src)); + if (strcmp(value, "1")) + return -1; - fd->simple_pci.len_sl = len; + return 0; +} - fd->simple_pci.bmt = 1; - fd->simple_pci.fmt = 3; - fd->simple_pci.sl = 1; - fd->simple_pci.ser = ser; +static int +dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; - fd->simple_pci.sportid = rbp->sportid; /*pcie 3 */ - fd->simple_pci.srbp = rbp->srbp; - if (rbp->srbp) - fd->simple_pci.rdttype = 0; - else - fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache; + if (!devargs) + return 0; - /*dest is pcie memory */ - fd->simple_pci.dportid = rbp->dportid; /*pcie 3 */ - fd->simple_pci.drbp = rbp->drbp; - if (rbp->drbp) - fd->simple_pci.wrttype = 0; - else - fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache; + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } - fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest)); - fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest)); + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); - return 0; + return 1; } static inline int -qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest, - uint32_t len, struct qbman_fd *fd, int ser) +qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring, + const uint16_t *elem, uint16_t nb, + uint16_t *free_space) { - fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src)); - fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src)); + uint16_t i; - fd->simple_ddr.len = len; + if (unlikely(nb > ring->free_space)) + return 0; - fd->simple_ddr.bmt = 1; - fd->simple_ddr.fmt = 3; - fd->simple_ddr.sl = 1; - fd->simple_ddr.ser = ser; - /** - * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011 - * Coherent copy of cacheable memory, - * lookup in downstream cache, no allocate - * on miss - */ - fd->simple_ddr.rns = 0; - fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache; - /** - * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111 - * Coherent write of cacheable memory, - * lookup in downstream cache, no allocate on miss - */ - fd->simple_ddr.wns = 0; - fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache; + for (i = 0; i < nb; i++) { + ring->cntx_idx_ring[ring->tail] = elem[i]; + ring->tail = (ring->tail + 1) & + (DPAA2_QDMA_MAX_DESC - 1); + } + ring->free_space -= nb; + ring->nb_in_ring += nb; - fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest)); - fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest)); + if (free_space) + *free_space = ring->free_space; - return 0; + return nb; } -static void -dpaa2_qdma_populate_fle(struct qbman_fle *fle, - uint64_t fle_iova, - struct rte_dpaa2_qdma_rbp *rbp, - uint64_t src, uint64_t dest, - size_t len, uint32_t flags, uint32_t fmt) +static inline int +qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring, + uint16_t *elem, uint16_t max) { - struct qdma_sdd *sdd; - uint64_t sdd_iova; + int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring; + + if (!ret) + return 0; + + if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) { + rte_memcpy(elem, + &ring->cntx_idx_ring[ring->start], + ret * sizeof(uint16_t)); + ring->start += ret; + } else { + rte_memcpy(elem, + &ring->cntx_idx_ring[ring->start], + (DPAA2_QDMA_MAX_DESC - ring->start) * + sizeof(uint16_t)); + rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start], + &ring->cntx_idx_ring[0], + (ret - DPAA2_QDMA_MAX_DESC + ring->start) * + sizeof(uint16_t)); + ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1); + } + ring->free_space += ret; + ring->nb_in_ring -= ret; + + return ret; +} + +static int +dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq) +{ + struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev; + uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid; + struct qbman_eq_desc eqdesc; + struct qbman_swp *swp; + uint32_t num_to_send = 0; + uint16_t num_tx = 0; + uint32_t enqueue_loop, loop; + int ret; + struct qbman_fd *fd = qdma_vq->fd; + uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d", + rte_gettid()); + return -EIO; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + /* Prepare enqueue descriptor */ + qbman_eq_desc_clear(&eqdesc); + qbman_eq_desc_set_fq(&eqdesc, txq_id); + qbman_eq_desc_set_no_orp(&eqdesc, 0); + qbman_eq_desc_set_response(&eqdesc, 0, 0); + + while (nb_fds > 0) { + num_to_send = (nb_fds > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_fds; + + /* Enqueue the packet to the QBMAN */ + enqueue_loop = 0; + loop = num_to_send; + + while (enqueue_loop < loop) { + ret = qbman_swp_enqueue_multiple(swp, + &eqdesc, + &fd[num_tx + enqueue_loop], + NULL, + loop - enqueue_loop); + if (likely(ret >= 0)) + enqueue_loop += ret; + } + num_tx += num_to_send; + nb_fds -= loop; + } - sdd = (struct qdma_sdd *) - ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET + - QDMA_FLE_SDD_OFFSET); - sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET; + qdma_vq->num_enqueues += num_tx; + if (unlikely(num_tx != qdma_vq->fd_idx)) { + dst_idx = 0; + for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) { + rte_memcpy(&qdma_vq->fd[dst_idx], + &qdma_vq->fd[idx], + sizeof(struct qbman_fd)); + dst_idx++; + } + } + qdma_vq->fd_idx -= num_tx; + + return num_tx; +} + +static void +fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd, + struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest, + uint32_t fmt) +{ + struct qbman_fle *fle = fle_sdd->fle; + struct qdma_sdd *sdd = fle_sdd->sdd; + uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd); /* first frame list to source descriptor */ - DPAA2_SET_FLE_ADDR(fle, sdd_iova); - DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd)))); + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova); + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE], + DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))); /* source and destination descriptor */ if (rbp && rbp->enable) { /* source */ - sdd->read_cmd.portid = rbp->sportid; - sdd->rbpcmd_simple.pfid = rbp->spfid; - sdd->rbpcmd_simple.vfa = rbp->vfa; - sdd->rbpcmd_simple.vfid = rbp->svfid; + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid = + rbp->sportid; + sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid = + rbp->spfid; + sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid = + rbp->svfid; + sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa = + rbp->svfa; if (rbp->srbp) { - sdd->read_cmd.rbp = rbp->srbp; - sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW; + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp = + rbp->srbp; + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype = + DPAA2_RBP_MEM_RW; } else { - sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache; + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype = + dpaa2_coherent_no_alloc_cache; } - sdd++; /* destination */ - sdd->write_cmd.portid = rbp->dportid; - sdd->rbpcmd_simple.pfid = rbp->dpfid; - sdd->rbpcmd_simple.vfa = rbp->vfa; - sdd->rbpcmd_simple.vfid = rbp->dvfid; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid = + rbp->dportid; + sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid = + rbp->dpfid; + sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid = + rbp->dvfid; + sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa = + rbp->dvfa; if (rbp->drbp) { - sdd->write_cmd.rbp = rbp->drbp; - sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp = + rbp->drbp; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype = + DPAA2_RBP_MEM_RW; } else { - sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype = + dpaa2_coherent_alloc_cache; } - } else { - sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache; - sdd++; - sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache; + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype = + dpaa2_coherent_no_alloc_cache; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype = + dpaa2_coherent_alloc_cache; } - fle++; /* source frame list to source buffer */ - if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) { - DPAA2_SET_FLE_ADDR(fle, src); -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - DPAA2_SET_FLE_BMT(fle); -#endif - } else { - DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src)); - } - fle->word4.fmt = fmt; - DPAA2_SET_FLE_LEN(fle, len); + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src); + /** IOMMU is always on for either VA or PA mode, + * so Bypass Memory Translation should be disabled. + * + * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]); + * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]); + */ + fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt; - fle++; /* destination frame list to destination buffer */ - if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) { -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - DPAA2_SET_FLE_BMT(fle); -#endif - DPAA2_SET_FLE_ADDR(fle, dest); - } else { - DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest)); - } - fle->word4.fmt = fmt; - DPAA2_SET_FLE_LEN(fle, len); + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest); + fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt; /* Final bit: 1, for last frame list */ - DPAA2_SET_FLE_FIN(fle); + DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]); } -static inline int -dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq, - struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) +static void +sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx) { - struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp; - struct rte_dpaa2_qdma_job **ppjob; - size_t iova; - int ret = 0, loop; - int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ? - 0 : 1; - - for (loop = 0; loop < nb_jobs; loop++) { - if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK) - iova = (size_t)job[loop]->dest; - else - iova = (size_t)job[loop]->src; - - /* Set the metadata */ - job[loop]->vq_id = qdma_vq->vq_id; - ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1; - *ppjob = job[loop]; - - if ((rbp->drbp == 1) || (rbp->srbp == 1)) - ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src, - (phys_addr_t)job[loop]->dest, - job[loop]->len, &fd[loop], rbp, ser); - else - ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src, - (phys_addr_t)job[loop]->dest, - job[loop]->len, &fd[loop], ser); + uint16_t i; + struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry; + struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry; + + for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) { + /* source SG */ + src_sge[i].ctrl.sl = QDMA_SG_SL_LONG; + src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB; + /** IOMMU is always on for either VA or PA mode, + * so Bypass Memory Translation should be disabled. + */ + src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE; + /* destination SG */ + dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG; + dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB; + /** IOMMU is always on for either VA or PA mode, + * so Bypass Memory Translation should be disabled. + */ + dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE; } +} - return ret; +static void +fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx, + struct qdma_virt_queue *qdma_vq) +{ + struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry; + struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry; + rte_iova_t src_sge_iova, dst_sge_iova; + struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp; + + memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg)); + + src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge); + dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge); + + sg_entry_pre_populate(sg_cntx); + fle_sdd_pre_populate(&sg_cntx->fle_sdd, + rbp, src_sge_iova, dst_sge_iova, + QBMAN_FLE_WORD4_FMT_SGE); } -static uint32_t -qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs, - struct qdma_sg_entry *src_sge, - struct qdma_sg_entry *dst_sge, - uint16_t nb_jobs) +static inline uint32_t +sg_entry_post_populate(const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx, + uint16_t nb_sge) { uint16_t i; uint32_t total_len = 0; - uint64_t iova; + struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry; + struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry; + + for (i = 0; i < (nb_sge - 1); i++) { + if (unlikely(src[i].length != dst[i].length)) + return -ENOTSUP; + src_sge->addr_lo = (uint32_t)src[i].addr; + src_sge->addr_hi = (src[i].addr >> 32); + src_sge->data_len.data_len_sl0 = src[i].length; + + dst_sge->addr_lo = (uint32_t)dst[i].addr; + dst_sge->addr_hi = (dst[i].addr >> 32); + dst_sge->data_len.data_len_sl0 = dst[i].length; + total_len += dst[i].length; + + src_sge->ctrl.f = 0; + dst_sge->ctrl.f = 0; + src_sge++; + dst_sge++; + } - for (i = 0; i < nb_jobs; i++) { - /* source SG */ - if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) { - src_sge->addr_lo = (uint32_t)jobs[i]->src; - src_sge->addr_hi = (jobs[i]->src >> 32); - } else { - iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src); - src_sge->addr_lo = (uint32_t)iova; - src_sge->addr_hi = iova >> 32; - } - src_sge->data_len.data_len_sl0 = jobs[i]->len; + if (unlikely(src[i].length != dst[i].length)) + return -ENOTSUP; + + src_sge->addr_lo = (uint32_t)src[i].addr; + src_sge->addr_hi = (src[i].addr >> 32); + src_sge->data_len.data_len_sl0 = src[i].length; + + dst_sge->addr_lo = (uint32_t)dst[i].addr; + dst_sge->addr_hi = (dst[i].addr >> 32); + dst_sge->data_len.data_len_sl0 = dst[i].length; + + total_len += dst[i].length; + sg_cntx->job_nb = nb_sge; + + src_sge->ctrl.f = QDMA_SG_F; + dst_sge->ctrl.f = QDMA_SG_F; + + return total_len; +} + +static inline void +sg_fle_post_populate(struct qbman_fle fle[], + size_t len) +{ + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len); + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len); +} + +static inline uint32_t +sg_entry_populate(const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx, + uint16_t nb_sge) +{ + uint16_t i; + uint32_t total_len = 0; + struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry; + struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry; + + for (i = 0; i < nb_sge; i++) { + if (unlikely(src[i].length != dst[i].length)) + return -ENOTSUP; + + src_sge->addr_lo = (uint32_t)src[i].addr; + src_sge->addr_hi = (src[i].addr >> 32); + src_sge->data_len.data_len_sl0 = src[i].length; src_sge->ctrl.sl = QDMA_SG_SL_LONG; src_sge->ctrl.fmt = QDMA_SG_FMT_SDB; -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE; -#else + /** IOMMU is always on for either VA or PA mode, + * so Bypass Memory Translation should be disabled. + */ src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE; -#endif - /* destination SG */ - if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) { - dst_sge->addr_lo = (uint32_t)jobs[i]->dest; - dst_sge->addr_hi = (jobs[i]->dest >> 32); - } else { - iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest); - dst_sge->addr_lo = (uint32_t)iova; - dst_sge->addr_hi = iova >> 32; - } - dst_sge->data_len.data_len_sl0 = jobs[i]->len; + dst_sge->addr_lo = (uint32_t)dst[i].addr; + dst_sge->addr_hi = (dst[i].addr >> 32); + dst_sge->data_len.data_len_sl0 = dst[i].length; dst_sge->ctrl.sl = QDMA_SG_SL_LONG; dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB; -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE; -#else + /** IOMMU is always on for either VA or PA mode, + * so Bypass Memory Translation should be disabled. + */ dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE; -#endif - total_len += jobs[i]->len; + total_len += src[i].length; - if (i == (nb_jobs - 1)) { + if (i == (nb_sge - 1)) { src_sge->ctrl.f = QDMA_SG_F; dst_sge->ctrl.f = QDMA_SG_F; } else { @@ -267,326 +406,623 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs, dst_sge++; } + sg_cntx->job_nb = nb_sge; + return total_len; } -static inline int -dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq, - struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) +static inline void +fle_populate(struct qbman_fle fle[], + struct qdma_sdd sdd[], uint64_t sdd_iova, + struct dpaa2_qdma_rbp *rbp, + uint64_t src_iova, uint64_t dst_iova, size_t len, + uint32_t fmt) { - struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp; - struct rte_dpaa2_qdma_job **ppjob; - uint16_t i; - void *elem; - struct qbman_fle *fle; - uint64_t elem_iova, fle_iova; + /* first frame list to source descriptor */ + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova); + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE], + (DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)))); - for (i = 0; i < nb_jobs; i++) { - elem = job[i]->usr_elem; -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - elem_iova = rte_mempool_virt2iova(elem); -#else - elem_iova = DPAA2_VADDR_TO_IOVA(elem); -#endif + /* source and destination descriptor */ + if (rbp && rbp->enable) { + /* source */ + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid = + rbp->sportid; + sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid = + rbp->spfid; + sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid = + rbp->svfid; + sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa = + rbp->svfa; - ppjob = (struct rte_dpaa2_qdma_job **) - ((uintptr_t)(uint64_t)elem + - QDMA_FLE_SINGLE_JOB_OFFSET); - *ppjob = job[i]; + if (rbp->srbp) { + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp = + rbp->srbp; + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype = + DPAA2_RBP_MEM_RW; + } else { + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype = + dpaa2_coherent_no_alloc_cache; + } + /* destination */ + sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid = + rbp->dportid; + sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid = + rbp->dpfid; + sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid = + rbp->dvfid; + sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa = + rbp->dvfa; - job[i]->vq_id = qdma_vq->vq_id; + if (rbp->drbp) { + sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp = + rbp->drbp; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype = + DPAA2_RBP_MEM_RW; + } else { + sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype = + dpaa2_coherent_alloc_cache; + } - fle = (struct qbman_fle *) - ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET); - fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET; + } else { + sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype = + dpaa2_coherent_no_alloc_cache; + sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype = + dpaa2_coherent_alloc_cache; + } + /* source frame list to source buffer */ + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova); + /** IOMMU is always on for either VA or PA mode, + * so Bypass Memory Translation should be disabled. + * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]); + * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]); + */ + fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt; + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len); - DPAA2_SET_FD_ADDR(&fd[i], fle_iova); - DPAA2_SET_FD_COMPOUND_FMT(&fd[i]); + /* destination frame list to destination buffer */ + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova); + fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt; + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len); - memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) + - DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd)); + /* Final bit: 1, for last frame list */ + DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]); +} - dpaa2_qdma_populate_fle(fle, fle_iova, rbp, - job[i]->src, job[i]->dest, job[i]->len, - job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF); - } +static inline void +fle_post_populate(struct qbman_fle fle[], + uint64_t src, uint64_t dest, size_t len) +{ + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src); + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len); - return 0; + DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest); + DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len); } static inline int -dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq, - struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) +dpaa2_qdma_submit(void *dev_private, uint16_t vchan) { - struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp; - struct rte_dpaa2_qdma_job **ppjob; - uint16_t i; + struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; + struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; + uint16_t expected = qdma_vq->fd_idx; int ret; - void *elem[DPAA2_QDMA_MAX_DESC]; - struct qbman_fle *fle; - uint64_t elem_iova, fle_iova; - ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs); - if (ret) { - DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE"); - return ret; - } + ret = dpaa2_qdma_multi_eq(qdma_vq); + if (likely(ret == expected)) + return 0; + + return -EBUSY; +} + +static inline void +dpaa2_qdma_fle_dump(const struct qbman_fle *fle) +{ + DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d", + fle->addr_hi, fle->addr_lo, fle->length, fle->frc, + fle->word4.bpid); + DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d", + fle->word4.ivp, fle->word4.bmt, fle->word4.offset, + fle->word4.fmt, fle->word4.sl, fle->word4.f); +} - for (i = 0; i < nb_jobs; i++) { -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - elem_iova = rte_mempool_virt2iova(elem[i]); -#else - elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]); -#endif +static inline void +dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd) +{ + DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x", + sdd->stride, sdd->rbpcmd, sdd->cmd); +} - ppjob = (struct rte_dpaa2_qdma_job **) - ((uintptr_t)(uint64_t)elem[i] + - QDMA_FLE_SINGLE_JOB_OFFSET); - *ppjob = job[i]; +static inline void +dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge) +{ + DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x", + sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0, + sge->ctrl_fields); +} - job[i]->vq_id = qdma_vq->vq_id; +static void +dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle) +{ + int i; + const struct qdma_cntx_fle_sdd *fle_sdd; + const struct qdma_sdd *sdd; + const struct qdma_cntx_sg *cntx_sg = NULL; - fle = (struct qbman_fle *) - ((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET); - fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET; + fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]); + sdd = fle_sdd->sdd; - DPAA2_SET_FD_ADDR(&fd[i], fle_iova); - DPAA2_SET_FD_COMPOUND_FMT(&fd[i]); - DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX); + for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) { + DPAA2_QDMA_INFO("fle[%d] info:", i); + dpaa2_qdma_fle_dump(&fle[i]); + } - memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) + - DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd)); + if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt != + fle[DPAA2_QDMA_DST_FLE].word4.fmt) { + DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)", + DPAA2_QDMA_SRC_FLE, + fle[DPAA2_QDMA_SRC_FLE].word4.fmt, + DPAA2_QDMA_DST_FLE, + fle[DPAA2_QDMA_DST_FLE].word4.fmt); + + return; + } else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt == + QBMAN_FLE_WORD4_FMT_SGE) { + cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg, + fle_sdd); + } else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt != + QBMAN_FLE_WORD4_FMT_SBF) { + DPAA2_QDMA_ERR("Unsupported fle format:%d", + fle[DPAA2_QDMA_SRC_FLE].word4.fmt); + return; + } - dpaa2_qdma_populate_fle(fle, fle_iova, rbp, - job[i]->src, job[i]->dest, job[i]->len, - job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF); + for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) { + DPAA2_QDMA_INFO("sdd[%d] info:", i); + dpaa2_qdma_sdd_dump(&sdd[i]); } - return 0; + if (cntx_sg) { + DPAA2_QDMA_INFO("long format/SG format, job number:%d", + cntx_sg->job_nb); + if (!cntx_sg->job_nb || + cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) { + DPAA2_QDMA_ERR("Invalid SG job number:%d", + cntx_sg->job_nb); + return; + } + for (i = 0; i < cntx_sg->job_nb; i++) { + DPAA2_QDMA_INFO("sg[%d] src info:", i); + dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]); + DPAA2_QDMA_INFO("sg[%d] dst info:", i); + dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]); + DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i, + cntx_sg->cntx_idx[i]); + } + } else { + DPAA2_QDMA_INFO("long format/Single buffer cntx"); + } } -static inline int -dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq, - struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) +static int +dpaa2_qdma_copy_sg(void *dev_private, + uint16_t vchan, + const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, + uint16_t nb_src, uint16_t nb_dst, + uint64_t flags) { - struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp; - struct rte_dpaa2_qdma_job **ppjob; - void *elem; + struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; + struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; + int ret = 0, expected, i; + uint32_t len; + struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx]; + struct qdma_cntx_sg *cntx_sg = NULL; + rte_iova_t cntx_iova, fle_iova, sdd_iova; + rte_iova_t src_sge_iova, dst_sge_iova; struct qbman_fle *fle; - uint64_t elem_iova, fle_iova, src, dst; - int ret = 0, i; - struct qdma_sg_entry *src_sge, *dst_sge; - uint32_t len, fmt, flags; - - /* - * Get an FLE/SDD from FLE pool. - * Note: IO metadata is before the FLE and SDD memory. - */ - if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) { - elem = job[0]->usr_elem; - } else { - ret = rte_mempool_get(qdma_vq->fle_pool, &elem); - if (ret) { - DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE"); - return ret; - } + struct qdma_sdd *sdd; + const uint16_t *idx_addr = NULL; + + if (unlikely(nb_src != nb_dst)) { + DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)", + nb_src, nb_dst); + return -ENOTSUP; + } + + if (unlikely(!nb_src)) { + DPAA2_QDMA_ERR("No SG entry specified"); + return -EINVAL; + } + + if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) { + DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)", + nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX); + return -EINVAL; } -#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - elem_iova = rte_mempool_virt2iova(elem); -#else - elem_iova = DPAA2_VADDR_TO_IOVA(elem); -#endif + memset(fd, 0, sizeof(struct qbman_fd)); - /* Set the metadata */ - /* Save job context. */ - *((uint16_t *) - ((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs; - ppjob = (struct rte_dpaa2_qdma_job **) - ((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET); - for (i = 0; i < nb_jobs; i++) - ppjob[i] = job[i]; + if (qdma_dev->is_silent) { + cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx]; + } else { + ret = rte_mempool_get(qdma_vq->fle_pool, + (void **)&cntx_sg); + if (ret) + return ret; + DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX); + idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags); + for (i = 0; i < nb_src; i++) + cntx_sg->cntx_idx[i] = idx_addr[i]; + } - ppjob[0]->vq_id = qdma_vq->vq_id; + cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset; - fle = (struct qbman_fle *) - ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET); - fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET; + fle = cntx_sg->fle_sdd.fle; + fle_iova = cntx_iova + + offsetof(struct qdma_cntx_sg, fle_sdd) + + offsetof(struct qdma_cntx_fle_sdd, fle); - DPAA2_SET_FD_ADDR(fd, fle_iova); + dpaa2_qdma_fd_set_addr(fd, fle_iova); DPAA2_SET_FD_COMPOUND_FMT(fd); - if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE)) - DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX); + DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg); + + if (qdma_vq->fle_pre_populate) { + if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) { + fle_sdd_sg_pre_populate(cntx_sg, qdma_vq); + if (!qdma_dev->is_silent && cntx_sg && idx_addr) { + for (i = 0; i < nb_src; i++) + cntx_sg->cntx_idx[i] = idx_addr[i]; + } + } - /* Populate FLE */ - if (likely(nb_jobs > 1)) { - src_sge = (struct qdma_sg_entry *) - ((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET); - dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB; - src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET; - dst = src + - DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry); - len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs); - fmt = QBMAN_FLE_WORD4_FMT_SGE; - flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY; + len = sg_entry_post_populate(src, dst, + cntx_sg, nb_src); + sg_fle_post_populate(fle, len); } else { - src = job[0]->src; - dst = job[0]->dest; - len = job[0]->len; - fmt = QBMAN_FLE_WORD4_FMT_SBF; - flags = job[0]->flags; + sdd = cntx_sg->fle_sdd.sdd; + sdd_iova = cntx_iova + + offsetof(struct qdma_cntx_sg, fle_sdd) + + offsetof(struct qdma_cntx_fle_sdd, sdd); + src_sge_iova = cntx_iova + + offsetof(struct qdma_cntx_sg, sg_src_entry); + dst_sge_iova = cntx_iova + + offsetof(struct qdma_cntx_sg, sg_dst_entry); + len = sg_entry_populate(src, dst, cntx_sg, nb_src); + + fle_populate(fle, sdd, sdd_iova, + &qdma_vq->rbp, src_sge_iova, dst_sge_iova, len, + QBMAN_FLE_WORD4_FMT_SGE); } - memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) + - DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd)); + if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG)) + dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle); - dpaa2_qdma_populate_fle(fle, fle_iova, rbp, - src, dst, len, flags, fmt); + dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG); + qdma_vq->fd_idx++; + qdma_vq->silent_idx = + (qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1); - return 0; + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + expected = qdma_vq->fd_idx; + ret = dpaa2_qdma_multi_eq(qdma_vq); + if (likely(ret == expected)) { + qdma_vq->copy_num += nb_src; + return (qdma_vq->copy_num - 1) & UINT16_MAX; + } + } else { + qdma_vq->copy_num += nb_src; + return (qdma_vq->copy_num - 1) & UINT16_MAX; + } + + return ret; } -static inline uint16_t -dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused, - const struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs) +static inline void +qdma_populate_fd_pci(uint64_t src, uint64_t dest, + uint32_t len, struct qbman_fd *fd, + struct dpaa2_qdma_rbp *rbp, int ser) { - uint16_t vqid; - size_t iova; - struct rte_dpaa2_qdma_job **ppjob; + fd->simple_pci.saddr_lo = lower_32_bits(src); + fd->simple_pci.saddr_hi = upper_32_bits(src); - if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32)) - iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32 - | (uint64_t)fd->simple_pci.daddr_lo); - else - iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32 - | (uint64_t)fd->simple_pci.saddr_lo); + fd->simple_pci.len_sl = len; - ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1; - *job = (struct rte_dpaa2_qdma_job *)*ppjob; - (*job)->status = (fd->simple_pci.acc_err << 8) | - (fd->simple_pci.error); - vqid = (*job)->vq_id; - *nb_jobs = 1; + fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE; + fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT; + fd->simple_pci.sl = 1; + fd->simple_pci.ser = ser; + if (ser) + fd->simple.frc |= QDMA_SER_CTX; + + fd->simple_pci.sportid = rbp->sportid; - return vqid; + fd->simple_pci.svfid = rbp->svfid; + fd->simple_pci.spfid = rbp->spfid; + fd->simple_pci.svfa = rbp->svfa; + fd->simple_pci.dvfid = rbp->dvfid; + fd->simple_pci.dpfid = rbp->dpfid; + fd->simple_pci.dvfa = rbp->dvfa; + + fd->simple_pci.srbp = rbp->srbp; + if (rbp->srbp) + fd->simple_pci.rdttype = 0; + else + fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache; + + /*dest is pcie memory */ + fd->simple_pci.dportid = rbp->dportid; + fd->simple_pci.drbp = rbp->drbp; + if (rbp->drbp) + fd->simple_pci.wrttype = 0; + else + fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache; + + fd->simple_pci.daddr_lo = lower_32_bits(dest); + fd->simple_pci.daddr_hi = upper_32_bits(dest); } -static inline uint16_t -dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq, - const struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t *nb_jobs) +static inline void +qdma_populate_fd_ddr(uint64_t src, uint64_t dest, + uint32_t len, struct qbman_fd *fd, int ser) { - struct qbman_fle *fle; - struct rte_dpaa2_qdma_job **ppjob = NULL; - uint16_t status; + fd->simple_ddr.saddr_lo = lower_32_bits(src); + fd->simple_ddr.saddr_hi = upper_32_bits(src); - /* - * Fetch metadata from FLE. job and vq_id were set - * in metadata in the enqueue operation. + fd->simple_ddr.len = len; + + fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE; + fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT; + fd->simple_ddr.sl = 1; + fd->simple_ddr.ser = ser; + if (ser) + fd->simple.frc |= QDMA_SER_CTX; + /** + * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011 + * Coherent copy of cacheable memory, + * lookup in downstream cache, no allocate + * on miss. + */ + fd->simple_ddr.rns = 0; + fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache; + /** + * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111 + * Coherent write of cacheable memory, + * lookup in downstream cache, no allocate on miss */ - fle = (struct qbman_fle *) - DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + fd->simple_ddr.wns = 0; + fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache; - *nb_jobs = 1; - ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle - - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET); + fd->simple_ddr.daddr_lo = lower_32_bits(dest); + fd->simple_ddr.daddr_hi = upper_32_bits(dest); +} - status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF); +static int +dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq, + rte_iova_t src, rte_iova_t dst, uint32_t length, + int is_silent, uint64_t flags) +{ + int ret = 0, expected; + struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx]; - *job = *ppjob; - (*job)->status = status; + memset(fd, 0, sizeof(struct qbman_fd)); - /* Free FLE to the pool */ - rte_mempool_put(qdma_vq->fle_pool, - (void *) - ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET)); + if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) { + /** PCIe EP*/ + qdma_populate_fd_pci(src, + dst, length, + fd, &qdma_vq->rbp, + is_silent ? 0 : 1); + } else { + /** DDR or PCIe RC*/ + qdma_populate_fd_ddr(src, + dst, length, + fd, is_silent ? 0 : 1); + } + dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags), + DPAA2_QDMA_FD_SHORT); + qdma_vq->fd_idx++; + + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + expected = qdma_vq->fd_idx; + ret = dpaa2_qdma_multi_eq(qdma_vq); + if (likely(ret == expected)) { + qdma_vq->copy_num++; + return (qdma_vq->copy_num - 1) & UINT16_MAX; + } + } else { + qdma_vq->copy_num++; + return (qdma_vq->copy_num - 1) & UINT16_MAX; + } - return (*job)->vq_id; + return ret; } -static inline uint16_t -dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq, - const struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t *nb_jobs) +static int +dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq, + rte_iova_t src, rte_iova_t dst, uint32_t length, + int is_silent, uint64_t flags) { + int ret = 0, expected; + struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx]; + struct qdma_cntx_fle_sdd *fle_sdd = NULL; + rte_iova_t fle_iova, sdd_iova; struct qbman_fle *fle; - struct rte_dpaa2_qdma_job **ppjob = NULL; - uint16_t i, status; + struct qdma_sdd *sdd; - /* - * Fetch metadata from FLE. job and vq_id were set - * in metadata in the enqueue operation. - */ - fle = (struct qbman_fle *) - DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); - *nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle - - QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET)); - ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle - - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET); - status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF); - - for (i = 0; i < (*nb_jobs); i++) { - job[i] = ppjob[i]; - job[i]->status = status; + memset(fd, 0, sizeof(struct qbman_fd)); + + if (is_silent) { + fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx]; + } else { + ret = rte_mempool_get(qdma_vq->fle_pool, + (void **)&fle_sdd); + if (ret) + return ret; + DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX); + } + + fle = fle_sdd->fle; + fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset; + + dpaa2_qdma_fd_set_addr(fd, fle_iova); + DPAA2_SET_FD_COMPOUND_FMT(fd); + DPAA2_SET_FD_FLC(fd, (uint64_t)fle); + + if (qdma_vq->fle_pre_populate) { + if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) { + fle_sdd_pre_populate(fle_sdd, + &qdma_vq->rbp, + 0, 0, QBMAN_FLE_WORD4_FMT_SBF); + } + + fle_post_populate(fle, src, dst, length); + } else { + sdd = fle_sdd->sdd; + sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset; + fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp, + src, dst, length, + QBMAN_FLE_WORD4_FMT_SBF); } - /* Free FLE to the pool */ - rte_mempool_put(qdma_vq->fle_pool, - (void *) - ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET)); + if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG)) + dpaa2_qdma_long_fmt_dump(fle); + + dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags), + DPAA2_QDMA_FD_LONG); + qdma_vq->fd_idx++; + qdma_vq->silent_idx = + (qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1); + + if (flags & RTE_DMA_OP_FLAG_SUBMIT) { + expected = qdma_vq->fd_idx; + ret = dpaa2_qdma_multi_eq(qdma_vq); + if (likely(ret == expected)) { + qdma_vq->copy_num++; + return (qdma_vq->copy_num - 1) & UINT16_MAX; + } + } else { + qdma_vq->copy_num++; + return (qdma_vq->copy_num - 1) & UINT16_MAX; + } - return job[0]->vq_id; + return ret; } -/* Function to receive a QDMA job for a given device and queue*/ static int -dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, - uint16_t *vq_id, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) +dpaa2_qdma_copy(void *dev_private, uint16_t vchan, + rte_iova_t src, rte_iova_t dst, + uint32_t length, uint64_t flags) { - struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev; - struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]); + struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; + struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; + + if (qdma_vq->using_short_fd) + return dpaa2_qdma_short_copy(qdma_vq, src, dst, + length, qdma_dev->is_silent, flags); + else + return dpaa2_qdma_long_copy(qdma_vq, src, dst, + length, qdma_dev->is_silent, flags); +} + +static inline int +dpaa2_qdma_dq_fd(const struct qbman_fd *fd, + struct qdma_virt_queue *qdma_vq, + uint16_t *free_space, uint16_t *fle_elem_nb) +{ + uint16_t idx, att; + enum dpaa2_qdma_fd_type type; + int ret; + struct qdma_cntx_sg *cntx_sg; + struct qdma_cntx_fle_sdd *fle_sdd; + + att = dpaa2_qdma_fd_get_att(fd); + type = DPAA2_QDMA_FD_ATT_TYPE(att); + if (type == DPAA2_QDMA_FD_SHORT) { + idx = DPAA2_QDMA_FD_ATT_CNTX(att); + ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx, + &idx, 1, free_space); + if (unlikely(ret != 1)) + return -ENOSPC; + + return 0; + } + if (type == DPAA2_QDMA_FD_LONG) { + idx = DPAA2_QDMA_FD_ATT_CNTX(att); + fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd); + qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd; + (*fle_elem_nb)++; + ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx, + &idx, 1, free_space); + if (unlikely(ret != 1)) + return -ENOSPC; + + return 0; + } + if (type == DPAA2_QDMA_FD_SG) { + fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd); + qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd; + (*fle_elem_nb)++; + cntx_sg = container_of(fle_sdd, + struct qdma_cntx_sg, fle_sdd); + ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx, + cntx_sg->cntx_idx, + cntx_sg->job_nb, free_space); + if (unlikely(ret < cntx_sg->job_nb)) + return -ENOSPC; + + return 0; + } + + DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x", + fd->simple_ddr.rsv1_att); + return -EIO; +} + +static uint16_t +dpaa2_qdma_dequeue(void *dev_private, + uint16_t vchan, const uint16_t nb_cpls, + uint16_t *cntx_idx, bool *has_error) +{ + struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; + struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; + + struct dpaa2_queue *rxq; struct qbman_result *dq_storage, *dq_storage1 = NULL; struct qbman_pull_desc pulldesc; struct qbman_swp *swp; struct queue_storage_info_t *q_storage; + uint32_t fqid; uint8_t status, pending; uint8_t num_rx = 0; const struct qbman_fd *fd; - uint16_t vqid, num_rx_ret; - uint16_t rx_fqid = rxq->fqid; int ret, pull_size; + uint16_t free_space = 0, fle_elem_nb = 0; - if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) { - /** Make sure there are enough space to get jobs.*/ - if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB)) - return -EINVAL; - nb_jobs = 1; - } + if (unlikely(qdma_dev->is_silent)) + return 0; if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_QDMA_ERR( - "Failed to allocate IO portal, tid: %d", + DPAA2_QDMA_ERR("Allocate portal err, tid(%d)", rte_gettid()); + if (has_error) + *has_error = true; return 0; } } swp = DPAA2_PER_LCORE_PORTAL; - pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs; - q_storage = rxq->q_storage; + pull_size = (nb_cpls > dpaa2_dqrr_size) ? + dpaa2_dqrr_size : nb_cpls; + rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]); + fqid = rxq->fqid; + q_storage = rxq->q_storage[0]; if (unlikely(!q_storage->active_dqs)) { q_storage->toggle = 0; @@ -594,21 +1030,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, q_storage->last_num_pkts = pull_size; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, - q_storage->last_num_pkts); - qbman_pull_desc_set_fq(&pulldesc, rx_fqid); + q_storage->last_num_pkts); + qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + DPAA2_VADDR_TO_IOVA(dq_storage), 1); if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs( - DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs( + DPAA2_PER_LCORE_DPIO->index))) ; clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); } while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - DPAA2_QDMA_DP_WARN( - "VDQ command not issued.QBMAN busy"); + DPAA2_QDMA_DP_WARN("QBMAN busy"); /* Portal was busy, try again */ continue; } @@ -617,7 +1052,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, q_storage->active_dqs = dq_storage; q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, - dq_storage); + dq_storage); } dq_storage = q_storage->active_dqs; @@ -631,9 +1066,9 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, dq_storage1 = q_storage->dq_storage[q_storage->toggle]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, pull_size); - qbman_pull_desc_set_fq(&pulldesc, rx_fqid); + qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage1, - (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + DPAA2_VADDR_TO_IOVA(dq_storage1), 1); /* Check if the previous issued command is completed. * Also seems like the SWP is shared between the Ethernet Driver @@ -646,706 +1081,340 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, pending = 1; - do { - /* Loop until the dq_storage is updated with - * new token by QBMAN - */ - while (!qbman_check_new_result(dq_storage)) - ; - rte_prefetch0((void *)((size_t)(dq_storage + 2))); - /* Check whether Last Pull command is Expired and - * setting Condition for Loop termination - */ - if (qbman_result_DQ_is_pull_complete(dq_storage)) { - pending = 0; - /* Check for valid frame. */ - status = qbman_result_DQ_flags(dq_storage); - if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) - continue; - } - fd = qbman_result_DQ_fd(dq_storage); - - vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx], - &num_rx_ret); - if (vq_id) - vq_id[num_rx] = vqid; - - dq_storage++; - num_rx += num_rx_ret; - } while (pending); - - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { - while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) - ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); - } - /* issue a volatile dequeue command for next pull */ - while (1) { - if (qbman_swp_pull(swp, &pulldesc)) { - DPAA2_QDMA_DP_WARN( - "VDQ command is not issued. QBMAN is busy (2)"); - continue; - } - break; - } - - q_storage->active_dqs = dq_storage1; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); - - return num_rx; -} - -static int -dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq, - uint16_t *vq_id, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) -{ - struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev; - struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]); - struct qbman_result *dq_storage; - struct qbman_pull_desc pulldesc; - struct qbman_swp *swp; - uint8_t status, pending; - uint8_t num_rx = 0; - const struct qbman_fd *fd; - uint16_t vqid, num_rx_ret; - uint16_t rx_fqid = rxq->fqid; - int ret, next_pull, num_pulled = 0; - - if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) { - /** Make sure there are enough space to get jobs.*/ - if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB)) - return -EINVAL; - nb_jobs = 1; - } - - next_pull = nb_jobs; - - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { - ret = dpaa2_affine_qbman_swp(); - if (ret) { - DPAA2_QDMA_ERR( - "Failed to allocate IO portal, tid: %d", - rte_gettid()); - return 0; - } - } - swp = DPAA2_PER_LCORE_PORTAL; - - rxq = &(dpdmai_dev->rx_queue[0]); - - do { - dq_storage = rxq->q_storage->dq_storage[0]; - /* Prepare dequeue descriptor */ - qbman_pull_desc_clear(&pulldesc); - qbman_pull_desc_set_fq(&pulldesc, rx_fqid); - qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); - - if (next_pull > dpaa2_dqrr_size) { - qbman_pull_desc_set_numframes(&pulldesc, - dpaa2_dqrr_size); - next_pull -= dpaa2_dqrr_size; - } else { - qbman_pull_desc_set_numframes(&pulldesc, next_pull); - next_pull = 0; - } - - while (1) { - if (qbman_swp_pull(swp, &pulldesc)) { - DPAA2_QDMA_DP_WARN( - "VDQ command not issued. QBMAN busy"); - /* Portal was busy, try again */ - continue; - } - break; - } - - rte_prefetch0((void *)((size_t)(dq_storage + 1))); - /* Check if the previous issued command is completed. */ - while (!qbman_check_command_complete(dq_storage)) - ; - - num_pulled = 0; - pending = 1; - - do { - /* Loop until dq_storage is updated - * with new token by QBMAN - */ - while (!qbman_check_new_result(dq_storage)) - ; - rte_prefetch0((void *)((size_t)(dq_storage + 2))); - - if (qbman_result_DQ_is_pull_complete(dq_storage)) { - pending = 0; - /* Check for valid frame. */ - status = qbman_result_DQ_flags(dq_storage); - if (unlikely((status & - QBMAN_DQ_STAT_VALIDFRAME) == 0)) - continue; - } - fd = qbman_result_DQ_fd(dq_storage); - - vqid = qdma_vq->get_job(qdma_vq, fd, - &job[num_rx], &num_rx_ret); - if (vq_id) - vq_id[num_rx] = vqid; - - dq_storage++; - num_rx += num_rx_ret; - num_pulled++; - - } while (pending); - /* Last VDQ provided all packets and more packets are requested */ - } while (next_pull && num_pulled == dpaa2_dqrr_size); - - return num_rx; -} - -static int -dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs) -{ - struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev; - uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid; - struct qbman_fd fd[DPAA2_QDMA_MAX_DESC]; - struct qbman_eq_desc eqdesc; - struct qbman_swp *swp; - uint32_t num_to_send = 0; - uint16_t num_tx = 0; - uint32_t enqueue_loop, loop; - int ret; - - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { - ret = dpaa2_affine_qbman_swp(); - if (ret) { - DPAA2_QDMA_ERR( - "Failed to allocate IO portal, tid: %d", - rte_gettid()); - return 0; - } - } - swp = DPAA2_PER_LCORE_PORTAL; - - /* Prepare enqueue descriptor */ - qbman_eq_desc_clear(&eqdesc); - qbman_eq_desc_set_fq(&eqdesc, txq_id); - qbman_eq_desc_set_no_orp(&eqdesc, 0); - qbman_eq_desc_set_response(&eqdesc, 0, 0); - - if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) { - uint16_t fd_nb; - uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ? - DPAA2_QDMA_MAX_SG_NB : nb_jobs; - uint16_t job_idx = 0; - uint16_t fd_sg_nb[8]; - uint16_t nb_jobs_ret = 0; - - if (nb_jobs % DPAA2_QDMA_MAX_SG_NB) - fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1; - else - fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB; - - memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb); - - for (loop = 0; loop < fd_nb; loop++) { - ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx], - sg_entry_nb); - if (unlikely(ret < 0)) - return 0; - fd_sg_nb[loop] = sg_entry_nb; - nb_jobs -= sg_entry_nb; - job_idx += sg_entry_nb; - sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ? - DPAA2_QDMA_MAX_SG_NB : nb_jobs; - } - - /* Enqueue the packet to the QBMAN */ - enqueue_loop = 0; - - while (enqueue_loop < fd_nb) { - ret = qbman_swp_enqueue_multiple(swp, - &eqdesc, &fd[enqueue_loop], - NULL, fd_nb - enqueue_loop); - if (likely(ret >= 0)) { - for (loop = 0; loop < (uint32_t)ret; loop++) - nb_jobs_ret += - fd_sg_nb[enqueue_loop + loop]; - enqueue_loop += ret; - } - } - - return nb_jobs_ret; - } - - memset(fd, 0, nb_jobs * sizeof(struct qbman_fd)); - - while (nb_jobs > 0) { - num_to_send = (nb_jobs > dpaa2_eqcr_size) ? - dpaa2_eqcr_size : nb_jobs; - - ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx], - &job[num_tx], num_to_send); - if (unlikely(ret < 0)) - break; - - /* Enqueue the packet to the QBMAN */ - enqueue_loop = 0; - loop = num_to_send; - - while (enqueue_loop < loop) { - ret = qbman_swp_enqueue_multiple(swp, - &eqdesc, - &fd[num_tx + enqueue_loop], - NULL, - loop - enqueue_loop); - if (likely(ret >= 0)) - enqueue_loop += ret; - } - num_tx += num_to_send; - nb_jobs -= loop; - } - - qdma_vq->num_enqueues += num_tx; - - return num_tx; -} - -static inline int -dpaa2_qdma_submit(void *dev_private, uint16_t vchan) -{ - struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; - struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; - - dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list, - qdma_vq->num_valid_jobs); - - qdma_vq->num_valid_jobs = 0; - - return 0; -} - -static int -dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan, - rte_iova_t src, rte_iova_t dst, - uint32_t length, uint64_t flags) -{ - struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; - struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; - struct rte_dpaa2_qdma_job *job; - int idx, ret; - - idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs); - - ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job); - if (ret) { - DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE"); - return -ENOSPC; - } - - job->src = src; - job->dest = dst; - job->len = length; - job->flags = flags; - job->status = 0; - job->vq_id = vchan; - - qdma_vq->job_list[qdma_vq->num_valid_jobs] = job; - qdma_vq->num_valid_jobs++; - - if (flags & RTE_DMA_OP_FLAG_SUBMIT) - dpaa2_qdma_submit(dev_private, vchan); - - return idx; -} - -int -rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan, - struct rte_dpaa2_qdma_job **jobs, - uint16_t nb_cpls) -{ - struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; - struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; - struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; - - return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls); -} - -static uint16_t -dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev, - struct qdma_virt_queue *qdma_vq, - struct rte_dpaa2_qdma_job **jobs, - uint16_t nb_jobs) -{ - struct qdma_virt_queue *temp_qdma_vq; - int ring_count; - int ret = 0, i; - - if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) { - /** Make sure there are enough space to get jobs.*/ - if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB)) - return -EINVAL; - } - - /* Only dequeue when there are pending jobs on VQ */ - if (qdma_vq->num_enqueues == qdma_vq->num_dequeues) - return 0; - - if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) && - qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs)) - nb_jobs = RTE_MIN((qdma_vq->num_enqueues - - qdma_vq->num_dequeues), nb_jobs); - - if (qdma_vq->exclusive_hw_queue) { - /* In case of exclusive queue directly fetch from HW queue */ - ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs); - if (ret < 0) { - DPAA2_QDMA_ERR( - "Dequeue from DPDMAI device failed: %d", ret); - return ret; - } - } else { - uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC]; - - /* Get the QDMA completed jobs from the software ring. - * In case they are not available on the ring poke the HW - * to fetch completed jobs from corresponding HW queues - */ - ring_count = rte_ring_count(qdma_vq->status_ring); - if (ring_count < nb_jobs) { - ret = qdma_vq->dequeue_job(qdma_vq, - temp_vq_id, jobs, nb_jobs); - for (i = 0; i < ret; i++) { - temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]]; - rte_ring_enqueue(temp_qdma_vq->status_ring, - (void *)(jobs[i])); - } - ring_count = rte_ring_count( - qdma_vq->status_ring); - } - - if (ring_count) { - /* Dequeue job from the software ring - * to provide to the user - */ - ret = rte_ring_dequeue_bulk(qdma_vq->status_ring, - (void **)jobs, - ring_count, NULL); - } - } - - qdma_vq->num_dequeues += ret; - return ret; -} - -static uint16_t -dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan, - const uint16_t nb_cpls, - uint16_t *last_idx, - enum rte_dma_status_code *st) -{ - struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; - struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; - struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC]; - int ret, i; - - ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls); - - for (i = 0; i < ret; i++) - st[i] = jobs[i]->status; - - rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret); - - if (last_idx != NULL) - *last_idx = (uint16_t)(qdma_vq->num_dequeues - 1); - - return ret; -} - -static uint16_t -dpaa2_qdma_dequeue(void *dev_private, - uint16_t vchan, const uint16_t nb_cpls, - uint16_t *last_idx, bool *has_error) -{ - struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; - struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; - struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC]; - int ret; + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd = qbman_result_DQ_fd(dq_storage); + ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb); + if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) + pending = 0; - RTE_SET_USED(has_error); + dq_storage++; + } while (pending); - ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, - jobs, nb_cpls); + if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + } + /* issue a volatile dequeue command for next pull */ + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_QDMA_DP_WARN("QBMAN is busy (2)"); + continue; + } + break; + } - rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret); + q_storage->active_dqs = dq_storage1; + q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); - if (last_idx != NULL) - *last_idx = (uint16_t)(qdma_vq->num_dequeues - 1); + if (fle_elem_nb > 0) { + rte_mempool_put_bulk(qdma_vq->fle_pool, + qdma_vq->fle_elem, fle_elem_nb); + } - return ret; -} + num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx, + cntx_idx, nb_cpls); -uint16_t -rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan, - struct rte_dpaa2_qdma_job **jobs, - uint16_t nb_cpls) -{ - struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; - struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; - struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan]; + if (has_error) + *has_error = false; - return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls); + return num_rx; } static int dpaa2_qdma_info_get(const struct rte_dma_dev *dev, - struct rte_dma_info *dev_info, - uint32_t info_sz) + struct rte_dma_info *dev_info, + uint32_t info_sz __rte_unused) { - RTE_SET_USED(dev); - RTE_SET_USED(info_sz); + struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | - RTE_DMA_CAPA_MEM_TO_DEV | - RTE_DMA_CAPA_DEV_TO_DEV | - RTE_DMA_CAPA_DEV_TO_MEM | - RTE_DMA_CAPA_SILENT | - RTE_DMA_CAPA_OPS_COPY; - dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS; + RTE_DMA_CAPA_MEM_TO_DEV | + RTE_DMA_CAPA_DEV_TO_DEV | + RTE_DMA_CAPA_DEV_TO_MEM | + RTE_DMA_CAPA_SILENT | + RTE_DMA_CAPA_OPS_COPY | + RTE_DMA_CAPA_OPS_COPY_SG; + dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX; + dev_info->max_vchans = dpdmai_dev->num_queues; dev_info->max_desc = DPAA2_QDMA_MAX_DESC; dev_info->min_desc = DPAA2_QDMA_MIN_DESC; + dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; + dev_info->dev_name = dev->device->name; + if (dpdmai_dev->qdma_dev) + dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs; return 0; } static int dpaa2_qdma_configure(struct rte_dma_dev *dev, - const struct rte_dma_conf *dev_conf, - uint32_t conf_sz) + const struct rte_dma_conf *dev_conf, + uint32_t conf_sz) { - char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */ struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + uint16_t i; + struct dpdmai_rx_queue_cfg rx_queue_cfg; + struct dpdmai_rx_queue_attr rx_attr; + struct dpdmai_tx_queue_attr tx_attr; + struct dpaa2_queue *rxq; + int ret = 0; DPAA2_QDMA_FUNC_TRACE(); RTE_SET_USED(conf_sz); - /* In case QDMA device is not in stopped state, return -EBUSY */ - if (qdma_dev->state == 1) { - DPAA2_QDMA_ERR( - "Device is in running state. Stop before config."); - return -1; - } + if (dev_conf->nb_vchans > dpdmai_dev->num_queues) { + DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)", + dev->data->dev_name, dev_conf->nb_vchans, + dpdmai_dev->num_queues); - /* Allocate Virtual Queues */ - sprintf(name, "qdma_%d_vq", dev->data->dev_id); - qdma_dev->vqs = rte_malloc(name, - (sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans), - RTE_CACHE_LINE_SIZE); - if (!qdma_dev->vqs) { - DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed"); - return -ENOMEM; + return -ENOTSUP; } - qdma_dev->num_vqs = dev_conf->nb_vchans; - - return 0; -} - -static int -check_devargs_handler(__rte_unused const char *key, - const char *value, - __rte_unused void *opaque) -{ - if (strcmp(value, "1")) - return -1; - return 0; -} - -static int -dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key) -{ - struct rte_kvargs *kvlist; + if (qdma_dev->vqs) { + DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)", + dev->data->dev_name, + qdma_dev->num_vqs, dev_conf->nb_vchans); + for (i = 0; i < qdma_dev->num_vqs; i++) { + if ((qdma_dev->vqs[i].num_enqueues != + qdma_dev->vqs[i].num_dequeues) && + !qdma_dev->is_silent) { + DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.", + i, qdma_dev->vqs[i].num_enqueues - + qdma_dev->vqs[i].num_dequeues); + return -EBUSY; + } + } + for (i = 0; i < qdma_dev->num_vqs; i++) { + if (qdma_dev->vqs[i].fle_pool) { + rte_mempool_free(qdma_dev->vqs[i].fle_pool); + qdma_dev->vqs[i].fle_pool = NULL; + } + if (qdma_dev->vqs[i].ring_cntx_idx) { + rte_free(qdma_dev->vqs[i].ring_cntx_idx); + qdma_dev->vqs[i].ring_cntx_idx = NULL; + } + rxq = &dpdmai_dev->rx_queue[i]; + dpaa2_queue_storage_free(rxq, 1); + } + rte_free(qdma_dev->vqs); + qdma_dev->vqs = NULL; + qdma_dev->num_vqs = 0; + } - if (!devargs) - return 0; + /* Set up Rx Queues */ + for (i = 0; i < dev_conf->nb_vchans; i++) { + memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg)); + rxq = &dpdmai_dev->rx_queue[i]; + ret = dpdmai_set_rx_queue(&s_proc_mc_reg, + CMD_PRI_LOW, + dpdmai_dev->token, + i, 0, &rx_queue_cfg); + if (ret) { + DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)", + dev->data->dev_name, i, ret); + return ret; + } + } - kvlist = rte_kvargs_parse(devargs->args, NULL); - if (!kvlist) - return 0; + /* Get Rx and Tx queues FQID's */ + for (i = 0; i < dev_conf->nb_vchans; i++) { + ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token, i, 0, &rx_attr); + if (ret) { + DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)", + dpdmai_dev->dpdmai_id, i, ret); + return ret; + } + dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid; - if (!rte_kvargs_count(kvlist, key)) { - rte_kvargs_free(kvlist); - return 0; + ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token, i, 0, &tx_attr); + if (ret) { + DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)", + dpdmai_dev->dpdmai_id, i, ret); + return ret; + } + dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid; } - if (rte_kvargs_process(kvlist, key, - check_devargs_handler, NULL) < 0) { - rte_kvargs_free(kvlist); - return 0; + /* Allocate Virtual Queues */ + qdma_dev->vqs = rte_zmalloc(NULL, + (sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans), + RTE_CACHE_LINE_SIZE); + if (!qdma_dev->vqs) { + DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.", + dev->data->dev_name, dev_conf->nb_vchans); + return -ENOMEM; + } + for (i = 0; i < dev_conf->nb_vchans; i++) { + qdma_dev->vqs[i].vq_id = i; + rxq = &dpdmai_dev->rx_queue[i]; + /* Allocate DQ storage for the DPDMAI Rx queues */ + ret = dpaa2_queue_storage_alloc(rxq, 1); + if (ret) + goto alloc_failed; } - rte_kvargs_free(kvlist); - return 1; -} + qdma_dev->num_vqs = dev_conf->nb_vchans; + qdma_dev->is_silent = dev_conf->enable_silent; -/* Enable FD in Ultra Short format */ -void -rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan) -{ - struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; - struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + return 0; - qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT; -} +alloc_failed: + for (i = 0; i < dev_conf->nb_vchans; i++) { + rxq = &dpdmai_dev->rx_queue[i]; + dpaa2_queue_storage_free(rxq, 1); + } -/* Enable internal SG processing */ -void -rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan) -{ - struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; - struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + rte_free(qdma_dev->vqs); + qdma_dev->vqs = NULL; + qdma_dev->num_vqs = 0; - qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT; + return ret; } -/* Enable RBP */ -void -rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan, - struct rte_dpaa2_qdma_rbp *rbp_config) +static int +dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq, + const struct rte_dma_vchan_conf *conf) { - struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; - struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV || + conf->direction == RTE_DMA_DIR_DEV_TO_DEV) { + if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE) + return -EINVAL; + vq->rbp.enable = 1; + vq->rbp.dportid = conf->dst_port.pcie.coreid; + vq->rbp.dpfid = conf->dst_port.pcie.pfid; + if (conf->dst_port.pcie.vfen) { + vq->rbp.dvfa = 1; + vq->rbp.dvfid = conf->dst_port.pcie.vfid; + } + vq->rbp.drbp = 1; + } + if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM || + conf->direction == RTE_DMA_DIR_DEV_TO_DEV) { + if (conf->src_port.port_type != RTE_DMA_PORT_PCIE) + return -EINVAL; + vq->rbp.enable = 1; + vq->rbp.sportid = conf->src_port.pcie.coreid; + vq->rbp.spfid = conf->src_port.pcie.pfid; + if (conf->src_port.pcie.vfen) { + vq->rbp.svfa = 1; + vq->rbp.dvfid = conf->src_port.pcie.vfid; + } + vq->rbp.srbp = 1; + } - memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config, - sizeof(struct rte_dpaa2_qdma_rbp)); + return 0; } static int dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, - const struct rte_dma_vchan_conf *conf, - uint32_t conf_sz) + const struct rte_dma_vchan_conf *conf, + uint32_t conf_sz) { struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; uint32_t pool_size; - char ring_name[32]; char pool_name[64]; - int fd_long_format = 1; - int sg_enable = 0; + int ret; + uint64_t iova, va; DPAA2_QDMA_FUNC_TRACE(); RTE_SET_USED(conf_sz); - if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) - sg_enable = 1; - - if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT) - fd_long_format = 0; + ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf); + if (ret) + return ret; - if (dev->data->dev_conf.enable_silent) - qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE; + if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_FLE_PRE_POPULATE)) + qdma_dev->vqs[vchan].fle_pre_populate = 1; + else + qdma_dev->vqs[vchan].fle_pre_populate = 0; - if (sg_enable) { - if (qdma_dev->num_vqs != 1) { - DPAA2_QDMA_ERR( - "qDMA SG format only supports physical queue!"); - return -ENODEV; - } - if (!fd_long_format) { - DPAA2_QDMA_ERR( - "qDMA SG format only supports long FD format!"); - return -ENODEV; - } - pool_size = QDMA_FLE_SG_POOL_SIZE; - } else { - pool_size = QDMA_FLE_SINGLE_POOL_SIZE; - } + if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_DESC_DEBUG)) + qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG; + else + qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG); - if (qdma_dev->num_vqs == 1) - qdma_dev->vqs[vchan].exclusive_hw_queue = 1; - else { - /* Allocate a Ring for Virtual Queue in VQ mode */ - snprintf(ring_name, sizeof(ring_name), "status ring %d %d", - dev->data->dev_id, vchan); - qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name, - conf->nb_desc, rte_socket_id(), 0); - if (!qdma_dev->vqs[vchan].status_ring) { - DPAA2_QDMA_ERR("Status ring creation failed for vq"); - return rte_errno; - } - } + if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_USING_SHORT_FD)) + qdma_dev->vqs[vchan].using_short_fd = 1; + else + qdma_dev->vqs[vchan].using_short_fd = 0; snprintf(pool_name, sizeof(pool_name), "qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan); + pool_size = sizeof(struct qdma_cntx_sg); qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name, - conf->nb_desc, pool_size, - QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0, - NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); + DPAA2_QDMA_MAX_DESC * 2, pool_size, + 512, 0, NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); if (!qdma_dev->vqs[vchan].fle_pool) { - DPAA2_QDMA_ERR("qdma_fle_pool create failed"); - return -ENOMEM; - } - - snprintf(pool_name, sizeof(pool_name), - "qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan); - qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name, - conf->nb_desc, pool_size, - QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0, - NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); - if (!qdma_dev->vqs[vchan].job_pool) { - DPAA2_QDMA_ERR("qdma_job_pool create failed"); + DPAA2_QDMA_ERR("%s create failed", pool_name); return -ENOMEM; } - - if (fd_long_format) { - if (sg_enable) { - qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf; - qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf; - } else { - if (dev->data->dev_conf.enable_silent) - qdma_dev->vqs[vchan].set_fd = - dpdmai_dev_set_multi_fd_lf_no_rsp; - else - qdma_dev->vqs[vchan].set_fd = - dpdmai_dev_set_multi_fd_lf; - qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf; + iova = qdma_dev->vqs[vchan].fle_pool->mz->iova; + va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64; + qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova; + + if (qdma_dev->is_silent) { + ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool, + (void **)qdma_dev->vqs[vchan].cntx_sg, + DPAA2_QDMA_MAX_DESC); + if (ret) { + DPAA2_QDMA_ERR("sg cntx get from %s for silent mode", + pool_name); + return ret; + } + ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool, + (void **)qdma_dev->vqs[vchan].cntx_fle_sdd, + DPAA2_QDMA_MAX_DESC); + if (ret) { + DPAA2_QDMA_ERR("long cntx get from %s for silent mode", + pool_name); + return ret; } } else { - qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us; - qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us; - } - - if (dpaa2_qdma_get_devargs(dev->device->devargs, - DPAA2_QDMA_PREFETCH)) { - /* If no prefetch is configured. */ - qdma_dev->vqs[vchan].dequeue_job = - dpdmai_dev_dequeue_multijob_prefetch; - DPAA2_QDMA_INFO("Prefetch RX Mode enabled"); - } else { - qdma_dev->vqs[vchan].dequeue_job = - dpdmai_dev_dequeue_multijob_no_prefetch; + qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL, + sizeof(struct qdma_cntx_idx_ring), + RTE_CACHE_LINE_SIZE); + if (!qdma_dev->vqs[vchan].ring_cntx_idx) { + DPAA2_QDMA_ERR("DQ response ring alloc failed."); + return -ENOMEM; + } + qdma_dev->vqs[vchan].ring_cntx_idx->start = 0; + qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0; + qdma_dev->vqs[vchan].ring_cntx_idx->free_space = + QDMA_CNTX_IDX_RING_MAX_FREE; + qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0; + qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL, + sizeof(void *) * DPAA2_QDMA_MAX_DESC, + RTE_CACHE_LINE_SIZE); } qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev; qdma_dev->vqs[vchan].nb_desc = conf->nb_desc; - qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi; return 0; } @@ -1354,11 +1423,17 @@ static int dpaa2_qdma_start(struct rte_dma_dev *dev) { struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + int ret; DPAA2_QDMA_FUNC_TRACE(); - qdma_dev->state = 1; + /* Enable the device */ + ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token); + if (ret) { + DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret); + return ret; + } return 0; } @@ -1367,46 +1442,67 @@ static int dpaa2_qdma_stop(struct rte_dma_dev *dev) { struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; - struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + int ret; DPAA2_QDMA_FUNC_TRACE(); - qdma_dev->state = 0; + /* Disable the device */ + ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token); + if (ret) { + DPAA2_QDMA_ERR("Disable device failed with err: %d", ret); + return ret; + } return 0; } static int -dpaa2_qdma_reset(struct rte_dma_dev *dev) +dpaa2_qdma_close(struct rte_dma_dev *dev) { struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; + struct dpaa2_queue *rxq; int i; DPAA2_QDMA_FUNC_TRACE(); - /* In case QDMA device is not in stopped state, return -EBUSY */ - if (qdma_dev->state == 1) { - DPAA2_QDMA_ERR( - "Device is in running state. Stop before reset."); - return -EBUSY; - } + if (!qdma_dev) + return 0; /* In case there are pending jobs on any VQ, return -EBUSY */ for (i = 0; i < qdma_dev->num_vqs; i++) { - if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues != - qdma_dev->vqs[i].num_dequeues)) { - DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i); + if ((qdma_dev->vqs[i].num_enqueues != + qdma_dev->vqs[i].num_dequeues) && + !qdma_dev->is_silent) { + DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")", + i, qdma_dev->vqs[i].num_enqueues, + qdma_dev->vqs[i].num_dequeues); return -EBUSY; } } - /* Reset and free virtual queues */ + /* Free RXQ storages */ for (i = 0; i < qdma_dev->num_vqs; i++) { - rte_ring_free(qdma_dev->vqs[i].status_ring); + rxq = &dpdmai_dev->rx_queue[i]; + dpaa2_queue_storage_free(rxq, 1); + } + + if (qdma_dev->vqs) { + /* Free RXQ fle pool */ + for (i = 0; i < qdma_dev->num_vqs; i++) { + if (qdma_dev->vqs[i].fle_pool) { + rte_mempool_free(qdma_dev->vqs[i].fle_pool); + qdma_dev->vqs[i].fle_pool = NULL; + } + if (qdma_dev->vqs[i].ring_cntx_idx) { + rte_free(qdma_dev->vqs[i].ring_cntx_idx); + qdma_dev->vqs[i].ring_cntx_idx = NULL; + } + } + rte_free(qdma_dev->vqs); + qdma_dev->vqs = NULL; } - rte_free(qdma_dev->vqs); - qdma_dev->vqs = NULL; /* Reset QDMA device structure */ qdma_dev->num_vqs = 0; @@ -1415,18 +1511,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev) } static int -dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev) -{ - DPAA2_QDMA_FUNC_TRACE(); - - dpaa2_qdma_reset(dev); - - return 0; -} - -static int -dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan, - struct rte_dma_stats *rte_stats, uint32_t size) +dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, + uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size) { struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private; struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; @@ -1481,123 +1567,44 @@ static int dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev) { struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; + struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev; int ret; DPAA2_QDMA_FUNC_TRACE(); - ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW, - dpdmai_dev->token); - if (ret) - DPAA2_QDMA_ERR("dmdmai disable failed"); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.", + dpdmai_dev->dpdmai_id); + return 0; + } - /* Set up the DQRR storage for Rx */ - struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]); + /* Close the device at underlying layer*/ + ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token); + if (ret) { + DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)", + dpdmai_dev->dpdmai_id, ret); - if (rxq->q_storage) { - dpaa2_free_dq_storage(rxq->q_storage); - rte_free(rxq->q_storage); + return ret; } - /* Close the device at underlying layer*/ - ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token); - if (ret) - DPAA2_QDMA_ERR("Failure closing dpdmai device"); + if (qdma_dev) { + rte_free(qdma_dev); + dpdmai_dev->qdma_dev = NULL; + } - return 0; + return ret; } static int -dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id) +dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id) { struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private; - struct dpdmai_rx_queue_cfg rx_queue_cfg; struct dpdmai_attr attr; - struct dpdmai_rx_queue_attr rx_attr; - struct dpdmai_tx_queue_attr tx_attr; - struct dpaa2_queue *rxq; - int ret; + int ret, err; DPAA2_QDMA_FUNC_TRACE(); - /* Open DPDMAI device */ - dpdmai_dev->dpdmai_id = dpdmai_id; - dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); - dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device), - RTE_CACHE_LINE_SIZE); - ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW, - dpdmai_dev->dpdmai_id, &dpdmai_dev->token); - if (ret) { - DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret); - return ret; - } - - /* Get DPDMAI attributes */ - ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW, - dpdmai_dev->token, &attr); - if (ret) { - DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d", - ret); - goto init_err; - } - dpdmai_dev->num_queues = attr.num_of_queues; - - /* Set up Rx Queue */ - memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg)); - ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai, - CMD_PRI_LOW, - dpdmai_dev->token, - 0, 0, &rx_queue_cfg); - if (ret) { - DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d", - ret); - goto init_err; - } - - /* Allocate DQ storage for the DPDMAI Rx queues */ - rxq = &(dpdmai_dev->rx_queue[0]); - rxq->q_storage = rte_malloc("dq_storage", - sizeof(struct queue_storage_info_t), - RTE_CACHE_LINE_SIZE); - if (!rxq->q_storage) { - DPAA2_QDMA_ERR("q_storage allocation failed"); - ret = -ENOMEM; - goto init_err; - } - - memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t)); - ret = dpaa2_alloc_dq_storage(rxq->q_storage); - if (ret) { - DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed"); - goto init_err; - } - - /* Get Rx and Tx queues FQID */ - ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW, - dpdmai_dev->token, 0, 0, &rx_attr); - if (ret) { - DPAA2_QDMA_ERR("Reading device failed with err: %d", - ret); - goto init_err; - } - dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid; - - ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW, - dpdmai_dev->token, 0, 0, &tx_attr); - if (ret) { - DPAA2_QDMA_ERR("Reading device failed with err: %d", - ret); - goto init_err; - } - dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid; - - /* Enable the device */ - ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW, - dpdmai_dev->token); - if (ret) { - DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret); - goto init_err; - } - if (!dpaa2_coherent_no_alloc_cache) { if (dpaa2_svr_family == SVR_LX2160A) { dpaa2_coherent_no_alloc_cache = @@ -1612,24 +1619,76 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id) } } - DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully"); + if (!s_proc_mc_reg.regs) + s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.", + dpdmai_id); + if (dpdmai_id != dpdmai_dev->dpdmai_id) { + DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)", + dpdmai_id, dpdmai_dev->dpdmai_id); + return -EINVAL; + } + if (!dpdmai_dev->qdma_dev) { + DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated", + dpdmai_id); + return -ENOMEM; + } + if (dpdmai_dev->qdma_dev->num_vqs) { + DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured", + dpdmai_id, dpdmai_dev->qdma_dev->num_vqs); + } + + return 0; + } + + /* Open DPDMAI device */ + dpdmai_dev->dpdmai_id = dpdmai_id; + + if (dpdmai_dev->qdma_dev) { + rte_free(dpdmai_dev->qdma_dev); + dpdmai_dev->qdma_dev = NULL; + } + dpdmai_dev->qdma_dev = rte_zmalloc(NULL, + sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE); + if (!dpdmai_dev->qdma_dev) { + DPAA2_QDMA_ERR("DMA(%d) alloc memory failed", + dpdmai_id); + return -ENOMEM; + } + ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->dpdmai_id, &dpdmai_dev->token); + if (ret) { + DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)", + __func__, dpdmai_dev->dpdmai_id, ret); + return ret; + } - /* Reset the QDMA device */ - ret = dpaa2_qdma_reset(dev); + /* Get DPDMAI attributes */ + ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token, &attr); if (ret) { - DPAA2_QDMA_ERR("Resetting QDMA failed"); - goto init_err; + DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)", + __func__, dpdmai_dev->dpdmai_id, ret); + err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW, + dpdmai_dev->token); + if (err) { + DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)", + dpdmai_dev->dpdmai_id, err); + } + return ret; } + dpdmai_dev->num_queues = attr.num_of_queues; + + DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id); return 0; -init_err: - dpaa2_dpdmai_dev_uninit(dev); - return ret; } static int dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv, - struct rte_dpaa2_device *dpaa2_dev) + struct rte_dpaa2_device *dpaa2_dev) { struct rte_dma_dev *dmadev; int ret; @@ -1639,8 +1698,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv, RTE_SET_USED(dpaa2_drv); dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name, - rte_socket_id(), - sizeof(struct dpaa2_dpdmai_dev)); + rte_socket_id(), + sizeof(struct dpaa2_dpdmai_dev)); if (!dmadev) { DPAA2_QDMA_ERR("Unable to allocate dmadevice"); return -EINVAL; @@ -1650,10 +1709,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv, dmadev->dev_ops = &dpaa2_qdma_ops; dmadev->device = &dpaa2_dev->device; dmadev->fp_obj->dev_private = dmadev->data->dev_private; - dmadev->fp_obj->copy = dpaa2_qdma_enqueue; + dmadev->fp_obj->copy = dpaa2_qdma_copy; + dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg; dmadev->fp_obj->submit = dpaa2_qdma_submit; dmadev->fp_obj->completed = dpaa2_qdma_dequeue; - dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status; dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity; /* Invoke PMD device initialization function */ @@ -1695,5 +1754,7 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = { RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd); RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma, - "no_prefetch= "); + DPAA2_QDMA_FLE_PRE_POPULATE "=" + DPAA2_QDMA_DESC_DEBUG"=" + DPAA2_QDMA_USING_SHORT_FD"="); RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO); diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h index 5941b5a5d3..0fd1debaf8 100644 --- a/drivers/dma/dpaa2/dpaa2_qdma.h +++ b/drivers/dma/dpaa2/dpaa2_qdma.h @@ -1,63 +1,16 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2022 NXP + * Copyright 2018-2024 NXP */ #ifndef _DPAA2_QDMA_H_ #define _DPAA2_QDMA_H_ -#define DPAA2_QDMA_MAX_DESC 1024 -#define DPAA2_QDMA_MIN_DESC 1 -#define DPAA2_QDMA_MAX_VHANS 64 - -#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT (1ULL << 0) -#define DPAA2_QDMA_VQ_FD_SG_FORMAT (1ULL << 1) -#define DPAA2_QDMA_VQ_NO_RESPONSE (1ULL << 2) - -#define DPAA2_QDMA_MAX_FLE 3 -#define DPAA2_QDMA_MAX_SDD 2 - -#define DPAA2_QDMA_MAX_SG_NB 64 - -#define DPAA2_DPDMAI_MAX_QUEUES 1 - -/** FLE single job pool size: job pointer(uint64_t) + - * 3 Frame list + 2 source/destination descriptor. - */ -#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \ - sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \ - sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD) - -/** FLE sg jobs pool size: job number(uint64_t) + - * 3 Frame list + 2 source/destination descriptor + - * 64 (src + dst) sg entries + 64 jobs pointers. - */ -#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \ - sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \ - sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \ - sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \ - sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB) +#include "portal/dpaa2_hw_pvt.h" +#include "portal/dpaa2_hw_dpio.h" -#define QDMA_FLE_JOB_NB_OFFSET 0 - -#define QDMA_FLE_SINGLE_JOB_OFFSET 0 - -#define QDMA_FLE_FLE_OFFSET \ - (QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t)) - -#define QDMA_FLE_SDD_OFFSET \ - (QDMA_FLE_FLE_OFFSET + \ - sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE) - -#define QDMA_FLE_SG_ENTRY_OFFSET \ - (QDMA_FLE_SDD_OFFSET + \ - sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD) - -#define QDMA_FLE_SG_JOBS_OFFSET \ - (QDMA_FLE_SG_ENTRY_OFFSET + \ - sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2) +#define DPAA2_QDMA_MAX_VHANS 64 -/** FLE pool cache size */ -#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2)) +#define DPAA2_DPDMAI_MAX_QUEUES 16 /** Notification by FQD_CTX[fqid] */ #define QDMA_SER_CTX (1 << 8) @@ -76,9 +29,14 @@ #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE 0xb /** Maximum possible H/W Queues on each core */ -#define MAX_HW_QUEUE_PER_CORE 64 +#define MAX_HW_QUEUE_PER_CORE 64 + +#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0 +#define DPAA2_QDMA_FD_LONG_FORMAT 0x1 +#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3 -#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000) +#define DPAA2_QDMA_BMT_ENABLE 0x1 +#define DPAA2_QDMA_BMT_DISABLE 0x0 /** Source/Destination Descriptor */ struct qdma_sdd { @@ -135,8 +93,8 @@ struct qdma_sdd { #define QDMA_SG_SL_SHORT 0x1 /* short length */ #define QDMA_SG_SL_LONG 0x0 /* long length */ #define QDMA_SG_F 0x1 /* last sg entry */ -#define QDMA_SG_BMT_ENABLE 0x1 -#define QDMA_SG_BMT_DISABLE 0x0 +#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE +#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE struct qdma_sg_entry { uint32_t addr_lo; /* address 0:31 */ @@ -166,12 +124,119 @@ struct qdma_sg_entry { }; } __rte_packed; +struct dpaa2_qdma_rbp { + uint32_t use_ultrashort:1; + uint32_t enable:1; + /** + * dportid: + * 0000 PCI-Express 1 + * 0001 PCI-Express 2 + * 0010 PCI-Express 3 + * 0011 PCI-Express 4 + * 0100 PCI-Express 5 + * 0101 PCI-Express 6 + */ + uint32_t dportid:4; + uint32_t dpfid:2; + uint32_t dvfid:6; + uint32_t dvfa:1; + /*using route by port for destination */ + uint32_t drbp:1; + /** + * sportid: + * 0000 PCI-Express 1 + * 0001 PCI-Express 2 + * 0010 PCI-Express 3 + * 0011 PCI-Express 4 + * 0100 PCI-Express 5 + * 0101 PCI-Express 6 + */ + uint32_t sportid:4; + uint32_t spfid:2; + uint32_t svfid:6; + uint32_t svfa:1; + /* using route by port for source */ + uint32_t srbp:1; + uint32_t rsv:2; +}; + +enum dpaa2_qdma_fd_type { + DPAA2_QDMA_FD_SHORT = 1, + DPAA2_QDMA_FD_LONG = 2, + DPAA2_QDMA_FD_SG = 3 +}; + +#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13 +#define DPAA2_QDMA_FD_ATT_MAX_IDX \ + ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1) +#define DPAA2_QDMA_FD_ATT_TYPE(att) \ + (att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET) +#define DPAA2_QDMA_FD_ATT_CNTX(att) \ + (att & DPAA2_QDMA_FD_ATT_MAX_IDX) + +#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2) +#define DPAA2_QDMA_MIN_DESC 1 + +static inline void +dpaa2_qdma_fd_set_addr(struct qbman_fd *fd, + uint64_t addr) +{ + fd->simple_ddr.saddr_lo = lower_32_bits(addr); + fd->simple_ddr.saddr_hi = upper_32_bits(addr); +} + +static inline void +dpaa2_qdma_fd_save_att(struct qbman_fd *fd, + uint16_t job_idx, enum dpaa2_qdma_fd_type type) +{ + RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX); + fd->simple_ddr.rsv1_att = job_idx | + (type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET); +} + +static inline uint16_t +dpaa2_qdma_fd_get_att(const struct qbman_fd *fd) +{ + return fd->simple_ddr.rsv1_att; +} + +enum { + DPAA2_QDMA_SDD_FLE, + DPAA2_QDMA_SRC_FLE, + DPAA2_QDMA_DST_FLE, + DPAA2_QDMA_MAX_FLE +}; + +enum { + DPAA2_QDMA_SRC_SDD, + DPAA2_QDMA_DST_SDD, + DPAA2_QDMA_MAX_SDD +}; + +struct qdma_cntx_fle_sdd { + struct qbman_fle fle[DPAA2_QDMA_MAX_FLE]; + struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD]; +} __rte_packed; + +struct qdma_cntx_sg { + struct qdma_cntx_fle_sdd fle_sdd; + struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX]; + struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX]; + uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX]; + uint16_t job_nb; + uint16_t rsv[3]; +} __rte_packed; + +#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \ + ((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK))) + +#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \ + ((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET) + /** Represents a DPDMAI device */ struct dpaa2_dpdmai_dev { /** Pointer to Next device instance */ TAILQ_ENTRY(dpaa2_qdma_device) next; - /** handle to DPDMAI object */ - struct fsl_mc_io dpdmai; /** HW ID for DPDMAI object */ uint32_t dpdmai_id; /** Tocken of this device */ @@ -185,63 +250,54 @@ struct dpaa2_dpdmai_dev { struct qdma_device *qdma_dev; }; -struct qdma_virt_queue; - -typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq, - const struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t *nb_jobs); -typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq, - struct qbman_fd *fd, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs); - -typedef int (qdma_dequeue_multijob_t)( - struct qdma_virt_queue *qdma_vq, - uint16_t *vq_id, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs); +#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64 +#define QDMA_CNTX_IDX_RING_MAX_FREE \ + (DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE) +struct qdma_cntx_idx_ring { + uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC]; + uint16_t start; + uint16_t tail; + uint16_t free_space; + uint16_t nb_in_ring; +}; -typedef int (qdma_enqueue_multijob_t)( - struct qdma_virt_queue *qdma_vq, - struct rte_dpaa2_qdma_job **job, - uint16_t nb_jobs); +#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0) /** Represents a QDMA virtual queue */ struct qdma_virt_queue { - /** Status ring of the virtual queue */ - struct rte_ring *status_ring; /** Associated hw queue */ struct dpaa2_dpdmai_dev *dpdmai_dev; /** FLE pool for the queue */ struct rte_mempool *fle_pool; + uint64_t fle_iova2va_offset; + void **fle_elem; /** Route by port */ - struct rte_dpaa2_qdma_rbp rbp; + struct dpaa2_qdma_rbp rbp; /** States if this vq is in use or not */ - uint8_t in_use; - /** States if this vq has exclusively associated hw queue */ - uint8_t exclusive_hw_queue; + uint8_t fle_pre_populate; /** Number of descriptor for the virtual DMA channel */ uint16_t nb_desc; /* Total number of enqueues on this VQ */ uint64_t num_enqueues; /* Total number of dequeues from this VQ */ uint64_t num_dequeues; + uint64_t copy_num; uint16_t vq_id; uint32_t flags; + struct qbman_fd fd[DPAA2_QDMA_MAX_DESC]; + uint16_t fd_idx; + struct qdma_cntx_idx_ring *ring_cntx_idx; + + /**Used for silent enabled*/ + struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC]; + struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC]; + uint16_t silent_idx; - struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC]; - struct rte_mempool *job_pool; int num_valid_jobs; + int using_short_fd; struct rte_dma_stats stats; - - qdma_set_fd_t *set_fd; - qdma_get_job_t *get_job; - - qdma_dequeue_multijob_t *dequeue_job; - qdma_enqueue_multijob_t *enqueue_job; }; /** Represents a QDMA device. */ @@ -250,8 +306,7 @@ struct qdma_device { struct qdma_virt_queue *vqs; /** Total number of VQ's */ uint16_t num_vqs; - /** Device state - started or stopped */ - uint8_t state; + uint8_t is_silent; }; #endif /* _DPAA2_QDMA_H_ */ diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build index a99151e2a5..a523f5edb4 100644 --- a/drivers/dma/dpaa2/meson.build +++ b/drivers/dma/dpaa2/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright 2021 NXP +# Copyright 2021, 2024 NXP if not is_linux build = false @@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c') if cc.has_argument('-Wno-pointer-arith') cflags += '-Wno-pointer-arith' endif - -headers = files('rte_pmd_dpaa2_qdma.h') diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h deleted file mode 100644 index 5a8da46d12..0000000000 --- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h +++ /dev/null @@ -1,177 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2021-2022 NXP - */ - -#ifndef _RTE_PMD_DPAA2_QDMA_H_ -#define _RTE_PMD_DPAA2_QDMA_H_ - -#include - -/** States if the source addresses is physical. */ -#define RTE_DPAA2_QDMA_JOB_SRC_PHY (1ULL << 30) - -/** States if the destination addresses is physical. */ -#define RTE_DPAA2_QDMA_JOB_DEST_PHY (1ULL << 31) - -struct rte_dpaa2_qdma_rbp { - uint32_t use_ultrashort:1; - uint32_t enable:1; - /** - * dportid: - * 0000 PCI-Express 1 - * 0001 PCI-Express 2 - * 0010 PCI-Express 3 - * 0011 PCI-Express 4 - * 0100 PCI-Express 5 - * 0101 PCI-Express 6 - */ - uint32_t dportid:4; - uint32_t dpfid:2; - uint32_t dvfid:6; - /*using route by port for destination */ - uint32_t drbp:1; - /** - * sportid: - * 0000 PCI-Express 1 - * 0001 PCI-Express 2 - * 0010 PCI-Express 3 - * 0011 PCI-Express 4 - * 0100 PCI-Express 5 - * 0101 PCI-Express 6 - */ - uint32_t sportid:4; - uint32_t spfid:2; - uint32_t svfid:6; - /* using route by port for source */ - uint32_t srbp:1; - /* Virtual Function Active */ - uint32_t vfa:1; - uint32_t rsv:3; -}; - -/** Determines a QDMA job */ -struct rte_dpaa2_qdma_job { - /** Source Address from where DMA is (to be) performed */ - uint64_t src; - /** Destination Address where DMA is (to be) done */ - uint64_t dest; - /** Length of the DMA operation in bytes. */ - uint32_t len; - /** See RTE_QDMA_JOB_ flags */ - uint32_t flags; - /** - * Status of the transaction. - * This is filled in the dequeue operation by the driver. - * upper 8bits acc_err for route by port. - * lower 8bits fd error - */ - uint16_t status; - uint16_t vq_id; - /** - * FLE pool element maintained by user, in case no qDMA response. - * Note: the address must be allocated from DPDK memory pool. - */ - void *usr_elem; -}; - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Enable FD in Ultra Short format on a channel. This API should be - * called before calling 'rte_dma_vchan_setup()' API. - * - * @param dev_id - * The identifier of the device. - * @param vchan - * The identifier of virtual DMA channel. - */ -__rte_experimental -void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Enable internal SG processing on a channel. This API should be - * called before calling 'rte_dma_vchan_setup()' API. - * - * @param dev_id - * The identifier of the device. - * @param vchan - * The identifier of virtual DMA channel. - */ -__rte_experimental -void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Enable Route-by-port on a channel. This API should be - * called before calling 'rte_dma_vchan_setup()' API. - * - * @param dev_id - * The identifier of the device. - * @param vchan - * The identifier of virtual DMA channel. - * @param rbp_config - * Configuration for route-by-port - */ -__rte_experimental -void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan, - struct rte_dpaa2_qdma_rbp *rbp_config); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Enqueue a copy operation onto the virtual DMA channel for silent mode, - * when dequeue is not required. - * - * This queues up a copy operation to be performed by hardware, if the 'flags' - * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin - * this operation, otherwise do not trigger doorbell. - * - * @param dev_id - * The identifier of the device. - * @param vchan - * The identifier of virtual DMA channel. - * @param jobs - * Jobs to be submitted to QDMA. - * @param nb_cpls - * Number of DMA jobs. - * - * @return - * - >= 0..Number of enqueued job. - * - -ENOSPC: if no space left to enqueue. - * - other values < 0 on failure. - */ -__rte_experimental -int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan, - struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Return the number of operations that have been successfully completed. - * - * @param dev_id - * The identifier of the device. - * @param vchan - * The identifier of virtual DMA channel. - * @param jobs - * Jobs completed by QDMA. - * @param nb_cpls - * Number of completed DMA jobs. - * - * @return - * The number of operations that successfully completed. This return value - * must be less than or equal to the value of nb_cpls. - */ -__rte_experimental -uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan, - struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls); - -#endif /* _RTE_PMD_DPAA2_QDMA_H_ */ diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map deleted file mode 100644 index fc16517f7a..0000000000 --- a/drivers/dma/dpaa2/version.map +++ /dev/null @@ -1,14 +0,0 @@ -DPDK_25 { - local: *; -}; - -EXPERIMENTAL { - global: - - # added in 22.07 - rte_dpaa2_qdma_completed_multi; - rte_dpaa2_qdma_copy_multi; - rte_dpaa2_qdma_vchan_fd_us_enable; - rte_dpaa2_qdma_vchan_internal_sg_enable; - rte_dpaa2_qdma_vchan_rbp_enable; -}; diff --git a/drivers/dma/hisilicon/hisi_dmadev.h b/drivers/dma/hisilicon/hisi_dmadev.h index a57b5c759a..786fe3cc0e 100644 --- a/drivers/dma/hisilicon/hisi_dmadev.h +++ b/drivers/dma/hisilicon/hisi_dmadev.h @@ -5,6 +5,7 @@ #ifndef HISI_DMADEV_H #define HISI_DMADEV_H +#include #include #include #include @@ -14,7 +15,7 @@ #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #define GENMASK(h, l) \ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) -#define BF_SHF(x) (__builtin_ffsll(x) - 1) +#define BF_SHF(x) rte_bsf64(x) #define FIELD_GET(mask, reg) \ ((typeof(mask))(((reg) & (mask)) >> BF_SHF(mask))) diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 531c489172..f2e591f547 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -2,15 +2,16 @@ * Copyright(C) 2021 Marvell. */ +#include + +#include "cn10k_cryptodev_ops.h" +#include "cn10k_ethdev.h" #include "cn10k_tx_worker.h" #include "cn10k_worker.h" -#include "cn10k_ethdev.h" -#include "cn10k_cryptodev_ops.h" +#include "cnxk_common.h" +#include "cnxk_dma_event_dp.h" #include "cnxk_eventdev.h" #include "cnxk_worker.h" -#include "cnxk_dma_event_dp.h" - -#include #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \ deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)] @@ -18,29 +19,6 @@ #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \ enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)] -static uint32_t -cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev) -{ - uint32_t wdata = 1; - - if (dev->deq_tmo_ns) - wdata |= BIT(16); - - switch (dev->gw_mode) { - case CN10K_GW_MODE_NONE: - default: - break; - case CN10K_GW_MODE_PREF: - wdata |= BIT(19); - break; - case CN10K_GW_MODE_PREF_WFE: - wdata |= BIT(20) | BIT(19); - break; - } - - return wdata; -} - static void * cn10k_sso_init_hws_mem(void *arg, uint8_t port_id) { @@ -61,9 +39,10 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id) ws->base = roc_sso_hws_base_get(&dev->sso, port_id); ws->hws_id = port_id; ws->swtag_req = 0; - ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev); + ws->gw_wdata = cnxk_sso_hws_prf_wdata(dev); ws->gw_rdata = SSO_TT_EMPTY << 32; ws->lmt_base = dev->sso.lmt_base; + ws->xae_waes = dev->sso.feat.xaq_wq_entries; return ws; } @@ -94,10 +73,11 @@ cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base) uint64_t val; ws->grp_base = grp_base; - ws->fc_mem = (int64_t *)dev->fc_iova; + ws->fc_mem = (int64_t __rte_atomic *)dev->fc_iova; ws->xaq_lmt = dev->xaq_lmt; - ws->fc_cache_space = dev->fc_cache_space; + ws->fc_cache_space = (int64_t __rte_atomic *)dev->fc_cache_space; ws->aw_lmt = ws->lmt_base; + ws->gw_wdata = cnxk_sso_hws_prf_wdata(dev); /* Set get_work timeout for HWS */ val = NSEC2USEC(dev->deq_tmo_ns); @@ -174,83 +154,6 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, return 0; } -static void -cn10k_sso_hws_reset(void *arg, void *hws) -{ - struct cnxk_sso_evdev *dev = arg; - struct cn10k_sso_hws *ws = hws; - uintptr_t base = ws->base; - uint64_t pend_state; - union { - __uint128_t wdata; - uint64_t u64[2]; - } gw; - uint8_t pend_tt; - bool is_pend; - - roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); - plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); - /* Wait till getwork/swtp/waitw/desched completes. */ - is_pend = false; - /* Work in WQE0 is always consumed, unless its a SWTAG. */ - pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); - if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) || - ws->swtag_req) - is_pend = true; - - do { - pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); - } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) | - BIT_ULL(56) | BIT_ULL(54))); - pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0)); - if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */ - if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED) - cnxk_sso_hws_swtag_untag(base + - SSOW_LF_GWS_OP_SWTAG_UNTAG); - plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); - } else if (pend_tt != SSO_TT_EMPTY) { - plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); - } - - /* Wait for desched to complete. */ - do { - pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); - } while (pend_state & (BIT_ULL(58) | BIT_ULL(56))); - - switch (dev->gw_mode) { - case CN10K_GW_MODE_PREF: - case CN10K_GW_MODE_PREF_WFE: - while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63)) - ; - break; - case CN10K_GW_MODE_NONE: - default: - break; - } - - if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) != - SSO_TT_EMPTY) { - plt_write64(BIT_ULL(16) | 1, - ws->base + SSOW_LF_GWS_OP_GET_WORK0); - do { - roc_load_pair(gw.u64[0], gw.u64[1], - ws->base + SSOW_LF_GWS_WQE0); - } while (gw.u64[0] & BIT_ULL(63)); - pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0)); - if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */ - if (pend_tt == SSO_TT_ATOMIC || - pend_tt == SSO_TT_ORDERED) - cnxk_sso_hws_swtag_untag( - base + SSOW_LF_GWS_OP_SWTAG_UNTAG); - plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); - } - } - - plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); - roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); - rte_mb(); -} - static void cn10k_sso_set_rsrc(void *arg) { @@ -311,11 +214,6 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) { #if !defined(CNXK_DIS_TMPLT_FUNC) struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); - const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_deq_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name, @@ -323,86 +221,42 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) #undef R }; - const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name, - - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_reas_deq[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_reas_deq_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_reas_deq_tmo[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_reas_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_reas_deq_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_seg_##name, - - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_reas_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_seg_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_reas_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_seg_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_reas_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn10k_sso_hws_reas_deq_tmo_seg_burst_##name, NIX_RX_FASTPATH_MODES @@ -424,48 +278,33 @@ cn10k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { if (dev->rx_offloads & NIX_RX_REAS_F) { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_reas_deq_seg); CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_reas_deq_seg_burst); - if (dev->is_timeout_deq) { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_reas_deq_tmo_seg); + if (dev->is_timeout_deq) CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_reas_deq_tmo_seg_burst); - } } else { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg); CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_seg_burst); - if (dev->is_timeout_deq) { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_deq_tmo_seg); + if (dev->is_timeout_deq) CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_tmo_seg_burst); - } } } else { if (dev->rx_offloads & NIX_RX_REAS_F) { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_reas_deq); CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_reas_deq_burst); - if (dev->is_timeout_deq) { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_reas_deq_tmo); + if (dev->is_timeout_deq) CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_reas_deq_tmo_burst); - } } else { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq); CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_burst); - if (dev->is_timeout_deq) { - CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_tmo); + if (dev->is_timeout_deq) CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_tmo_burst); - } } } @@ -486,12 +325,9 @@ cn10k_sso_fp_blk_fns_set(struct rte_eventdev *event_dev) #if defined(CNXK_DIS_TMPLT_FUNC) struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); - event_dev->dequeue = cn10k_sso_hws_deq_all_offload; event_dev->dequeue_burst = cn10k_sso_hws_deq_burst_all_offload; - if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) { - event_dev->dequeue = cn10k_sso_hws_deq_all_offload_tst; + if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) event_dev->dequeue_burst = cn10k_sso_hws_deq_burst_all_offload_tst; - } event_dev->txa_enqueue = cn10k_sso_hws_tx_adptr_enq_seg_all_offload; event_dev->txa_enqueue_same_dest = cn10k_sso_hws_tx_adptr_enq_seg_all_offload; if (dev->tx_offloads & (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_VLAN_QINQ_F | @@ -514,7 +350,6 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev) cn10k_sso_fp_blk_fns_set(event_dev); cn10k_sso_fp_tmplt_fns_set(event_dev); - event_dev->enqueue = cn10k_sso_hws_enq; event_dev->enqueue_burst = cn10k_sso_hws_enq_burst; event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst; event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst; @@ -570,18 +405,7 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev) if (rc < 0) goto cnxk_rsrc_fini; - switch (event_dev->data->dev_conf.preschedule_type) { - default: - case RTE_EVENT_PRESCHEDULE_NONE: - dev->gw_mode = CN10K_GW_MODE_NONE; - break; - case RTE_EVENT_PRESCHEDULE: - dev->gw_mode = CN10K_GW_MODE_PREF; - break; - case RTE_EVENT_PRESCHEDULE_ADAPTIVE: - dev->gw_mode = CN10K_GW_MODE_PREF_WFE; - break; - } + dev->gw_mode = cnxk_sso_hws_preschedule_get(event_dev->data->dev_conf.preschedule_type); rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem, cn10k_sso_hws_setup); @@ -664,13 +488,13 @@ cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, /* Check if we have work in PRF_WQE0, if so extract it. */ switch (dev->gw_mode) { - case CN10K_GW_MODE_PREF: - case CN10K_GW_MODE_PREF_WFE: + case CNXK_GW_MODE_PREF: + case CNXK_GW_MODE_PREF_WFE: while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63)) ; break; - case CN10K_GW_MODE_NONE: + case CNXK_GW_MODE_NONE: default: break; } @@ -739,24 +563,6 @@ cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, uint8_t queues return cn10k_sso_port_unlink_profile(event_dev, port, queues, nb_unlinks, 0); } -static void -cn10k_sso_configure_queue_stash(struct rte_eventdev *event_dev) -{ - struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); - struct roc_sso_hwgrp_stash stash[dev->stash_cnt]; - int i, rc; - - plt_sso_dbg(); - for (i = 0; i < dev->stash_cnt; i++) { - stash[i].hwgrp = dev->stash_parse_data[i].queue; - stash[i].stash_offset = dev->stash_parse_data[i].stash_offset; - stash[i].stash_count = dev->stash_parse_data[i].stash_length; - } - rc = roc_sso_hwgrp_stash_config(&dev->sso, stash, dev->stash_cnt); - if (rc < 0) - plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc); -} - static int cn10k_sso_start(struct rte_eventdev *event_dev) { @@ -768,9 +574,8 @@ cn10k_sso_start(struct rte_eventdev *event_dev) if (rc < 0) return rc; - cn10k_sso_configure_queue_stash(event_dev); - rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset, - cn10k_sso_hws_flush_events); + cnxk_sso_configure_queue_stash(event_dev); + rc = cnxk_sso_start(event_dev, cnxk_sso_hws_reset, cn10k_sso_hws_flush_events); if (rc < 0) return rc; cn10k_sso_fp_fns_set(event_dev); @@ -791,8 +596,7 @@ cn10k_sso_stop(struct rte_eventdev *event_dev) for (i = 0; i < event_dev->data->nb_ports; i++) hws[i] = i; roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports); - cnxk_sso_stop(event_dev, cn10k_sso_hws_reset, - cn10k_sso_hws_flush_events); + cnxk_sso_stop(event_dev, cnxk_sso_hws_reset, cn10k_sso_hws_flush_events); } static int @@ -835,7 +639,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) for (i = 0; i < dev->nb_event_ports; i++) { struct cn10k_sso_hws *ws = event_dev->data->ports[i]; ws->xaq_lmt = dev->xaq_lmt; - ws->fc_mem = (int64_t *)dev->fc_iova; + ws->fc_mem = (int64_t __rte_atomic *)dev->fc_iova; ws->tstamp = dev->tstamp; if (lookup_mem) ws->lookup_mem = lookup_mem; @@ -848,7 +652,6 @@ eventdev_fops_update(struct rte_eventdev *event_dev) struct rte_event_fp_ops *fp_op = rte_event_fp_ops + event_dev->data->dev_id; - fp_op->dequeue = event_dev->dequeue; fp_op->dequeue_burst = event_dev->dequeue_burst; } @@ -1318,7 +1121,7 @@ RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=" CNXK_SSO_GGRP_QOS "=" CNXK_SSO_FORCE_BP "=1" - CN10K_SSO_STASH "=" + CNXK_SSO_STASH "=" CNXK_TIM_DISABLE_NPA "=1" CNXK_TIM_CHNK_SLOTS "=" CNXK_TIM_RINGS_LMT "=" diff --git a/drivers/event/cnxk/cn10k_eventdev.h b/drivers/event/cnxk/cn10k_eventdev.h index 372121465c..4f0eab8acb 100644 --- a/drivers/event/cnxk/cn10k_eventdev.h +++ b/drivers/event/cnxk/cn10k_eventdev.h @@ -19,10 +19,11 @@ struct __rte_cache_aligned cn10k_sso_hws { struct cnxk_timesync_info **tstamp; uint64_t meta_aura; /* Add Work Fastpath data */ - alignas(RTE_CACHE_LINE_SIZE) int64_t *fc_mem; - int64_t *fc_cache_space; + alignas(RTE_CACHE_LINE_SIZE) int64_t __rte_atomic *fc_mem; + int64_t __rte_atomic *fc_cache_space; uintptr_t aw_lmt; uintptr_t grp_base; + uint16_t xae_waes; int32_t xaq_lmt; /* Tx Fastpath data */ alignas(RTE_CACHE_LINE_SIZE) uintptr_t lmt_base; diff --git a/drivers/event/cnxk/cn10k_tx_worker.h b/drivers/event/cnxk/cn10k_tx_worker.h index 0695ea23e1..19cb2e22e5 100644 --- a/drivers/event/cnxk/cn10k_tx_worker.h +++ b/drivers/event/cnxk/cn10k_tx_worker.h @@ -51,7 +51,9 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq) : "memory"); #else do { - avail = txq->nb_sqb_bufs_adj - __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED); + avail = txq->nb_sqb_bufs_adj - + rte_atomic_load_explicit((uint64_t __rte_atomic *)txq->fc_mem, + rte_memory_order_relaxed); } while (((avail << txq->sqes_per_sqb_log2) - avail) <= 0); #endif } @@ -60,7 +62,8 @@ static __rte_always_inline int32_t cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq) { int32_t avail = (int32_t)txq->nb_sqb_bufs_adj - - (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED); + (int32_t)rte_atomic_load_explicit((uint64_t __rte_atomic *)txq->fc_mem, + rte_memory_order_relaxed); return (avail << txq->sqes_per_sqb_log2) - avail; } diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c index a0e85face1..80077ec8a1 100644 --- a/drivers/event/cnxk/cn10k_worker.c +++ b/drivers/event/cnxk/cn10k_worker.c @@ -2,6 +2,8 @@ * Copyright(C) 2021 Marvell. */ +#include "roc_api.h" + #include "cn10k_worker.h" #include "cnxk_eventdev.h" #include "cnxk_worker.h" @@ -16,7 +18,7 @@ cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev) const uint64_t event_ptr = ev->u64; const uint16_t grp = ev->queue_id; - rte_atomic_thread_fence(__ATOMIC_ACQ_REL); + rte_atomic_thread_fence(rte_memory_order_acq_rel); if (ws->xaq_lmt <= *ws->fc_mem) return 0; @@ -80,8 +82,8 @@ cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws, static inline int32_t sso_read_xaq_space(struct cn10k_sso_hws *ws) { - return (ws->xaq_lmt - __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED)) * - ROC_SSO_XAE_PER_XAQ; + return (ws->xaq_lmt - rte_atomic_load_explicit(ws->fc_mem, rte_memory_order_relaxed)) * + ws->xae_waes; } static inline void @@ -90,49 +92,24 @@ sso_lmt_aw_wait_fc(struct cn10k_sso_hws *ws, int64_t req) int64_t cached, refill; retry: - while (__atomic_load_n(ws->fc_cache_space, __ATOMIC_RELAXED) < 0) + while (rte_atomic_load_explicit(ws->fc_cache_space, rte_memory_order_relaxed) < 0) ; - cached = __atomic_fetch_sub(ws->fc_cache_space, req, __ATOMIC_ACQUIRE) - req; + cached = rte_atomic_fetch_sub_explicit(ws->fc_cache_space, req, rte_memory_order_acquire) - + req; /* Check if there is enough space, else update and retry. */ if (cached < 0) { /* Check if we have space else retry. */ do { refill = sso_read_xaq_space(ws); } while (refill <= 0); - __atomic_compare_exchange(ws->fc_cache_space, &cached, &refill, - 0, __ATOMIC_RELEASE, - __ATOMIC_RELAXED); + rte_atomic_compare_exchange_strong_explicit(ws->fc_cache_space, &cached, refill, + rte_memory_order_release, + rte_memory_order_relaxed); goto retry; } } -uint16_t __rte_hot -cn10k_sso_hws_enq(void *port, const struct rte_event *ev) -{ - struct cn10k_sso_hws *ws = port; - - switch (ev->op) { - case RTE_EVENT_OP_NEW: - return cn10k_sso_hws_new_event(ws, ev); - case RTE_EVENT_OP_FORWARD: - cn10k_sso_hws_forward_event(ws, ev); - break; - case RTE_EVENT_OP_RELEASE: - if (ws->swtag_req) { - cnxk_sso_hws_desched(ev->u64, ws->base); - ws->swtag_req = 0; - break; - } - cnxk_sso_hws_swtag_flush(ws->base); - break; - default: - return 0; - } - - return 1; -} - #define VECTOR_SIZE_BITS 0xFFFFFFFFFFF80000ULL #define VECTOR_GET_LINE_OFFSET(line) (19 + (3 * line)) @@ -384,8 +361,29 @@ uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { + struct cn10k_sso_hws *ws = port; + RTE_SET_USED(nb_events); - return cn10k_sso_hws_enq(port, ev); + + switch (ev->op) { + case RTE_EVENT_OP_NEW: + return cn10k_sso_hws_new_event(ws, ev); + case RTE_EVENT_OP_FORWARD: + cn10k_sso_hws_forward_event(ws, ev); + break; + case RTE_EVENT_OP_RELEASE: + if (ws->swtag_req) { + cnxk_sso_hws_desched(ev->u64, ws->base); + ws->swtag_req = 0; + break; + } + cnxk_sso_hws_swtag_flush(ws->base); + break; + default: + return 0; + } + + return 1; } uint16_t __rte_hot @@ -398,7 +396,7 @@ cn10k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[], int32_t space; /* Do a common back-pressure check and return */ - space = sso_read_xaq_space(ws) - ROC_SSO_XAE_PER_XAQ; + space = sso_read_xaq_space(ws) - ws->xae_waes; if (space <= 0) return 0; nb_events = space < nb_events ? space : nb_events; diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h index 4785cc6575..954dee5a2a 100644 --- a/drivers/event/cnxk/cn10k_worker.h +++ b/drivers/event/cnxk/cn10k_worker.h @@ -311,7 +311,7 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev, roc_load_pair(gw.u64[0], gw.u64[1], ws->base + SSOW_LF_GWS_WQE0); } while (gw.u64[0] & BIT_ULL(63)); - rte_atomic_thread_fence(__ATOMIC_SEQ_CST); + rte_atomic_thread_fence(rte_memory_order_seq_cst); #endif ws->gw_rdata = gw.u64[0]; if (gw.u64[1]) @@ -366,7 +366,6 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev, } /* CN10K Fastpath functions. */ -uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev); uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events); @@ -381,83 +380,51 @@ int __rte_hot cn10k_sso_hws_preschedule_modify(void *port, enum rte_event_dev_preschedule_type type); #define R(name, flags) \ - uint16_t __rte_hot cn10k_sso_hws_deq_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_ca_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_ca_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_ca_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_ca_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_ca_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_ca_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_ca_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_ca_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_ca_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn10k_sso_hws_reas_deq_tmo_ca_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); @@ -465,44 +432,55 @@ int __rte_hot cn10k_sso_hws_preschedule_modify(void *port, NIX_RX_FASTPATH_MODES #undef R -#define SSO_DEQ(fn, flags) \ - uint16_t __rte_hot fn(void *port, struct rte_event *ev, \ - uint64_t timeout_ticks) \ - { \ - struct cn10k_sso_hws *ws = port; \ - RTE_SET_USED(timeout_ticks); \ - if (ws->swtag_req) { \ - ws->swtag_req = 0; \ - ws->gw_rdata = cnxk_sso_hws_swtag_wait( \ - ws->base + SSOW_LF_GWS_WQE0); \ - return 1; \ - } \ - return cn10k_sso_hws_get_work(ws, ev, flags); \ +#define SSO_DEQ(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn10k_sso_hws *ws = port; \ + RTE_SET_USED(timeout_ticks); \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \ + return 1; \ + } \ + return cn10k_sso_hws_get_work(ws, ev, flags); \ } #define SSO_DEQ_SEG(fn, flags) SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F) -#define SSO_DEQ_TMO(fn, flags) \ - uint16_t __rte_hot fn(void *port, struct rte_event *ev, \ - uint64_t timeout_ticks) \ - { \ - struct cn10k_sso_hws *ws = port; \ - uint16_t ret = 1; \ - uint64_t iter; \ - if (ws->swtag_req) { \ - ws->swtag_req = 0; \ - ws->gw_rdata = cnxk_sso_hws_swtag_wait( \ - ws->base + SSOW_LF_GWS_WQE0); \ - return ret; \ - } \ - ret = cn10k_sso_hws_get_work(ws, ev, flags); \ - for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \ - ret = cn10k_sso_hws_get_work(ws, ev, flags); \ - return ret; \ +#define SSO_DEQ_TMO(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn10k_sso_hws *ws = port; \ + uint16_t ret = 1; \ + uint64_t iter; \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \ + return ret; \ + } \ + ret = cn10k_sso_hws_get_work(ws, ev, flags); \ + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \ + ret = cn10k_sso_hws_get_work(ws, ev, flags); \ + return ret; \ } #define SSO_DEQ_TMO_SEG(fn, flags) SSO_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F) +#define R(name, flags) \ + SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ + SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) \ + SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ + SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) \ + SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ + SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) \ + SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + +NIX_RX_FASTPATH_MODES +#undef R + #define SSO_CMN_DEQ_BURST(fnb, fn, flags) \ uint16_t __rte_hot fnb(void *port, struct rte_event ev[], \ uint16_t nb_events, uint64_t timeout_ticks) \ @@ -519,12 +497,8 @@ NIX_RX_FASTPATH_MODES return fn(port, ev, timeout_ticks); \ } -uint16_t __rte_hot cn10k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, - uint64_t timeout_ticks); uint16_t __rte_hot cn10k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); -uint16_t __rte_hot cn10k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, - uint64_t timeout_ticks); uint16_t __rte_hot cn10k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); diff --git a/drivers/event/cnxk/cn20k_eventdev.c b/drivers/event/cnxk/cn20k_eventdev.c new file mode 100644 index 0000000000..d68700fc05 --- /dev/null +++ b/drivers/event/cnxk/cn20k_eventdev.c @@ -0,0 +1,1085 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include "roc_api.h" + +#include "cn20k_ethdev.h" +#include "cn20k_eventdev.h" +#include "cn20k_tx_worker.h" +#include "cn20k_worker.h" +#include "cnxk_common.h" +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +#define CN20K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \ + deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)] + +#define CN20K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \ + enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)] + +static void * +cn20k_sso_init_hws_mem(void *arg, uint8_t port_id) +{ + struct cnxk_sso_evdev *dev = arg; + struct cn20k_sso_hws *ws; + + /* Allocate event port memory */ + ws = rte_zmalloc("cn20k_ws", sizeof(struct cn20k_sso_hws) + RTE_CACHE_LINE_SIZE, + RTE_CACHE_LINE_SIZE); + if (ws == NULL) { + plt_err("Failed to alloc memory for port=%d", port_id); + return NULL; + } + + /* First cache line is reserved for cookie */ + ws = (struct cn20k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE); + ws->base = roc_sso_hws_base_get(&dev->sso, port_id); + ws->hws_id = port_id; + ws->swtag_req = 0; + ws->gw_wdata = cnxk_sso_hws_prf_wdata(dev); + ws->gw_rdata = SSO_TT_EMPTY << 32; + ws->xae_waes = dev->sso.feat.xaq_wq_entries; + + return ws; +} + +static int +cn20k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link, uint8_t profile) +{ + struct cnxk_sso_evdev *dev = arg; + struct cn20k_sso_hws *ws = port; + + return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link, profile, 0); +} + +static int +cn20k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link, uint8_t profile) +{ + struct cnxk_sso_evdev *dev = arg; + struct cn20k_sso_hws *ws = port; + + return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link, profile, 0); +} + +static void +cn20k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base) +{ + struct cnxk_sso_evdev *dev = arg; + struct cn20k_sso_hws *ws = hws; + uint64_t val; + + ws->grp_base = grp_base; + ws->fc_mem = (int64_t __rte_atomic *)dev->fc_iova; + ws->xaq_lmt = dev->xaq_lmt; + ws->fc_cache_space = (int64_t __rte_atomic *)dev->fc_cache_space; + ws->aw_lmt = dev->sso.lmt_base; + ws->gw_wdata = cnxk_sso_hws_prf_wdata(dev); + ws->lmt_base = dev->sso.lmt_base; + + /* Set get_work timeout for HWS */ + val = NSEC2USEC(dev->deq_tmo_ns); + val = val ? val - 1 : 0; + plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM); +} + +static void +cn20k_sso_hws_release(void *arg, void *hws) +{ + struct cnxk_sso_evdev *dev = arg; + struct cn20k_sso_hws *ws = hws; + uint16_t i, j; + + for (i = 0; i < CNXK_SSO_MAX_PROFILES; i++) + for (j = 0; j < dev->nb_event_queues; j++) + roc_sso_hws_unlink(&dev->sso, ws->hws_id, &j, 1, i, 0); + memset(ws, 0, sizeof(*ws)); +} + +static int +cn20k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cnxk_handle_event_t fn, + void *arg) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg); + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; + struct cn20k_sso_hws *ws = hws; + uint64_t cq_ds_cnt = 1; + uint64_t aq_cnt = 1; + uint64_t ds_cnt = 1; + struct rte_event ev; + uint64_t val, req; + + plt_write64(0, base + SSO_LF_GGRP_QCTL); + + roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); + plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); + req = queue_id; /* GGRP ID */ + req |= BIT_ULL(18); /* Grouped */ + req |= BIT_ULL(16); /* WAIT */ + + aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT); + ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT); + cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT); + cq_ds_cnt &= 0x3FFF3FFF0000; + + while (aq_cnt || cq_ds_cnt || ds_cnt) { + plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0); + cn20k_sso_hws_get_work_empty(ws, &ev, 0); + if (fn != NULL && ev.u64 != 0) + fn(arg, ev); + if (ev.sched_type != SSO_TT_EMPTY) + cnxk_sso_hws_swtag_flush(ws->base); + else if (retry-- == 0) + break; + do { + val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (val & BIT_ULL(56)); + aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT); + ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT); + cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT); + /* Extract cq and ds count */ + cq_ds_cnt &= 0x3FFF3FFF0000; + } + + if (aq_cnt || cq_ds_cnt || ds_cnt) + return -EAGAIN; + + plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); + roc_sso_hws_gwc_invalidate(&dev->sso, &ws->hws_id, 1); + rte_mb(); + + return 0; +} + +static void +cn20k_sso_set_rsrc(void *arg) +{ + struct cnxk_sso_evdev *dev = arg; + + dev->max_event_ports = dev->sso.max_hws; + dev->max_event_queues = dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ? + RTE_EVENT_MAX_QUEUES_PER_DEV : + dev->sso.max_hwgrp; +} + +static int +cn20k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp) +{ + struct cnxk_tim_evdev *tim_dev = cnxk_tim_priv_get(); + struct cnxk_sso_evdev *dev = arg; + uint16_t nb_tim_lfs; + + nb_tim_lfs = tim_dev ? tim_dev->nb_rings : 0; + return roc_sso_rsrc_init(&dev->sso, hws, hwgrp, nb_tim_lfs); +} + +static int +cn20k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int i; + + if (dev->tx_adptr_data == NULL) + return 0; + + for (i = 0; i < dev->nb_event_ports; i++) { + struct cn20k_sso_hws *ws = event_dev->data->ports[i]; + void *ws_cookie; + + ws_cookie = cnxk_sso_hws_get_cookie(ws); + ws_cookie = rte_realloc_socket(ws_cookie, + sizeof(struct cnxk_sso_hws_cookie) + + sizeof(struct cn20k_sso_hws) + + dev->tx_adptr_data_sz, + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (ws_cookie == NULL) + return -ENOMEM; + ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie)); + memcpy(&ws->tx_adptr_data, dev->tx_adptr_data, dev->tx_adptr_data_sz); + event_dev->data->ports[i] = ws; + } + + return 0; +} + +#if defined(RTE_ARCH_ARM64) +static inline void +cn20k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) +{ +#if !defined(CNXK_DIS_TMPLT_FUNC) + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + + const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_deq_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_deq_tmo_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_deq_seg_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_deq_tmo_seg_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_reas_deq_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_reas_deq_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_reas_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_reas_deq_tmo_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_reas_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_reas_deq_seg_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_reas_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = { +#define R(name, flags) [flags] = cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + /* Tx modes */ + const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = { +#define T(name, sz, flags) [flags] = cn20k_sso_hws_tx_adptr_enq_##name, + NIX_TX_FASTPATH_MODES +#undef T + }; + + const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = { +#define T(name, sz, flags) [flags] = cn20k_sso_hws_tx_adptr_enq_seg_##name, + NIX_TX_FASTPATH_MODES +#undef T + }; + + if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { + if (dev->rx_offloads & NIX_RX_REAS_F) { + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_reas_deq_seg_burst); + if (dev->is_timeout_deq) + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_reas_deq_tmo_seg_burst); + } else { + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_deq_seg_burst); + + if (dev->is_timeout_deq) + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_deq_tmo_seg_burst); + } + } else { + if (dev->rx_offloads & NIX_RX_REAS_F) { + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_reas_deq_burst); + + if (dev->is_timeout_deq) + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_reas_deq_tmo_burst); + } else { + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_burst); + + if (dev->is_timeout_deq) + CN20K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_deq_tmo_burst); + } + } + + if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) + CN20K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg); + else + CN20K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq); + + event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue; +#else + RTE_SET_USED(event_dev); +#endif +} + +static inline void +cn20k_sso_fp_blk_fns_set(struct rte_eventdev *event_dev) +{ +#if defined(CNXK_DIS_TMPLT_FUNC) + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + + event_dev->dequeue_burst = cn20k_sso_hws_deq_burst_all_offload; + if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) + event_dev->dequeue_burst = cn20k_sso_hws_deq_burst_all_offload_tst; + event_dev->txa_enqueue = cn20k_sso_hws_tx_adptr_enq_seg_all_offload; + event_dev->txa_enqueue_same_dest = cn20k_sso_hws_tx_adptr_enq_seg_all_offload; + if (dev->tx_offloads & (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_VLAN_QINQ_F | + NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_TSTAMP_F)) { + event_dev->txa_enqueue = cn20k_sso_hws_tx_adptr_enq_seg_all_offload_tst; + event_dev->txa_enqueue_same_dest = cn20k_sso_hws_tx_adptr_enq_seg_all_offload_tst; + } +#else + RTE_SET_USED(event_dev); +#endif +} +#endif + +static void +cn20k_sso_fp_fns_set(struct rte_eventdev *event_dev) +{ +#if defined(RTE_ARCH_ARM64) + cn20k_sso_fp_blk_fns_set(event_dev); + cn20k_sso_fp_tmplt_fns_set(event_dev); + + event_dev->enqueue_burst = cn20k_sso_hws_enq_burst; + event_dev->enqueue_new_burst = cn20k_sso_hws_enq_new_burst; + event_dev->enqueue_forward_burst = cn20k_sso_hws_enq_fwd_burst; + + event_dev->profile_switch = cn20k_sso_hws_profile_switch; + event_dev->preschedule_modify = cn20k_sso_hws_preschedule_modify; + event_dev->preschedule = cn20k_sso_hws_preschedule; +#else + RTE_SET_USED(event_dev); +#endif +} + +static void +cn20k_sso_info_get(struct rte_eventdev *event_dev, struct rte_event_dev_info *dev_info) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + + dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN20K_PMD); + cnxk_sso_info_get(dev, dev_info); + dev_info->max_event_port_enqueue_depth = UINT32_MAX; +} + +static int +cn20k_sso_dev_configure(const struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int rc; + + rc = cnxk_sso_dev_validate(event_dev, 1, UINT32_MAX); + if (rc < 0) { + plt_err("Invalid event device configuration"); + return -EINVAL; + } + + rc = cn20k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues); + if (rc < 0) { + plt_err("Failed to initialize SSO resources"); + return -ENODEV; + } + + rc = cnxk_sso_xaq_allocate(dev); + if (rc < 0) + goto cnxk_rsrc_fini; + + dev->gw_mode = cnxk_sso_hws_preschedule_get(event_dev->data->dev_conf.preschedule_type); + + rc = cnxk_setup_event_ports(event_dev, cn20k_sso_init_hws_mem, cn20k_sso_hws_setup); + if (rc < 0) + goto cnxk_rsrc_fini; + + /* Restore any prior port-queue mapping. */ + cnxk_sso_restore_links(event_dev, cn20k_sso_hws_link); + + dev->configured = 1; + rte_mb(); + + return 0; +cnxk_rsrc_fini: + roc_sso_rsrc_fini(&dev->sso); + dev->nb_event_ports = 0; + return rc; +} + +static int +cn20k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id, + const struct rte_event_port_conf *port_conf) +{ + + RTE_SET_USED(port_conf); + return cnxk_sso_port_setup(event_dev, port_id, cn20k_sso_hws_setup); +} + +static void +cn20k_sso_port_release(void *port) +{ + struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port); + struct cnxk_sso_evdev *dev; + + if (port == NULL) + return; + + dev = cnxk_sso_pmd_priv(gws_cookie->event_dev); + if (!gws_cookie->configured) + goto free; + + cn20k_sso_hws_release(dev, port); + memset(gws_cookie, 0, sizeof(*gws_cookie)); +free: + rte_free(gws_cookie); +} + +static void +cn20k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, + rte_eventdev_port_flush_t flush_cb, void *args) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct cn20k_sso_hws *ws = port; + struct rte_event ev; + uint64_t ptag; + bool is_pend; + + is_pend = false; + /* Work in WQE0 is always consumed, unless its a SWTAG. */ + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req) + is_pend = true; + do { + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54))); + + cn20k_sso_hws_get_work_empty(ws, &ev, + (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | NIX_RX_MULTI_SEG_F); + if (is_pend && ev.u64) + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); + ptag = (plt_read64(ws->base + SSOW_LF_GWS_TAG) >> 32) & SSO_TT_EMPTY; + if (ptag != SSO_TT_EMPTY) + cnxk_sso_hws_swtag_flush(ws->base); + + do { + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & BIT_ULL(56)); + + /* Check if we have work in PRF_WQE0, if so extract it. */ + switch (dev->gw_mode) { + case CNXK_GW_MODE_PREF: + case CNXK_GW_MODE_PREF_WFE: + while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63)) + ; + break; + case CNXK_GW_MODE_NONE: + default: + break; + } + + if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) != SSO_TT_EMPTY) { + plt_write64(BIT_ULL(16) | 1, ws->base + SSOW_LF_GWS_OP_GET_WORK0); + cn20k_sso_hws_get_work_empty( + ws, &ev, (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | NIX_RX_MULTI_SEG_F); + if (ev.u64) { + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); + } + cnxk_sso_hws_swtag_flush(ws->base); + do { + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & BIT_ULL(56)); + } + ws->swtag_req = 0; + plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); +} + +static int +cn20k_sso_port_link_profile(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links, uint8_t profile) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint16_t hwgrp_ids[nb_links]; + uint16_t link; + + RTE_SET_USED(priorities); + for (link = 0; link < nb_links; link++) + hwgrp_ids[link] = queues[link]; + nb_links = cn20k_sso_hws_link(dev, port, hwgrp_ids, nb_links, profile); + + return (int)nb_links; +} + +static int +cn20k_sso_port_unlink_profile(struct rte_eventdev *event_dev, void *port, uint8_t queues[], + uint16_t nb_unlinks, uint8_t profile) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint16_t hwgrp_ids[nb_unlinks]; + uint16_t unlink; + + for (unlink = 0; unlink < nb_unlinks; unlink++) + hwgrp_ids[unlink] = queues[unlink]; + nb_unlinks = cn20k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks, profile); + + return (int)nb_unlinks; +} + +static int +cn20k_sso_port_link(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], + const uint8_t priorities[], uint16_t nb_links) +{ + return cn20k_sso_port_link_profile(event_dev, port, queues, priorities, nb_links, 0); +} + +static int +cn20k_sso_port_unlink(struct rte_eventdev *event_dev, void *port, uint8_t queues[], + uint16_t nb_unlinks) +{ + return cn20k_sso_port_unlink_profile(event_dev, port, queues, nb_unlinks, 0); +} + +static int +cn20k_sso_start(struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV]; + int rc, i; + + cnxk_sso_configure_queue_stash(event_dev); + rc = cnxk_sso_start(event_dev, cnxk_sso_hws_reset, cn20k_sso_hws_flush_events); + if (rc < 0) + return rc; + cn20k_sso_fp_fns_set(event_dev); + for (i = 0; i < event_dev->data->nb_ports; i++) + hws[i] = i; + roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports); + + return rc; +} + +static void +cn20k_sso_stop(struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint8_t hws[RTE_EVENT_MAX_PORTS_PER_DEV]; + int i; + + for (i = 0; i < event_dev->data->nb_ports; i++) + hws[i] = i; + roc_sso_hws_gwc_invalidate(&dev->sso, hws, event_dev->data->nb_ports); + cnxk_sso_stop(event_dev, cnxk_sso_hws_reset, cn20k_sso_hws_flush_events); +} + +static int +cn20k_sso_close(struct rte_eventdev *event_dev) +{ + return cnxk_sso_close(event_dev, cn20k_sso_hws_unlink); +} + +static int +cn20k_sso_selftest(void) +{ + return cnxk_sso_selftest(RTE_STR(event_cn20k)); +} + +static int +cn20k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev, + const struct rte_eth_dev *eth_dev, uint32_t *caps) +{ + int rc; + + RTE_SET_USED(event_dev); + rc = strncmp(eth_dev->device->driver->name, "net_cn20k", 9); + if (rc) + *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; + else + *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | + RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | + RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID | + RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR; + + return 0; +} + +static void +cn20k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int i; + + for (i = 0; i < dev->nb_event_ports; i++) { + struct cn20k_sso_hws *ws = event_dev->data->ports[i]; + ws->xaq_lmt = dev->xaq_lmt; + ws->fc_mem = (int64_t __rte_atomic *)dev->fc_iova; + ws->tstamp = dev->tstamp; + if (lookup_mem) + ws->lookup_mem = lookup_mem; + } +} + +static void +eventdev_fops_tstamp_update(struct rte_eventdev *event_dev) +{ + struct rte_event_fp_ops *fp_op = rte_event_fp_ops + event_dev->data->dev_id; + + fp_op->dequeue_burst = event_dev->dequeue_burst; +} + +static void +cn20k_sso_tstamp_hdl_update(uint16_t port_id, uint16_t flags, bool ptp_en) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct cnxk_eth_dev *cnxk_eth_dev = dev->data->dev_private; + struct rte_eventdev *event_dev = cnxk_eth_dev->evdev_priv; + struct cnxk_sso_evdev *evdev = cnxk_sso_pmd_priv(event_dev); + + evdev->rx_offloads |= flags; + if (ptp_en) + evdev->tstamp[port_id] = &cnxk_eth_dev->tstamp; + else + evdev->tstamp[port_id] = NULL; + cn20k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); + eventdev_fops_tstamp_update(event_dev); +} + +static int +cn20k_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id, uint16_t port_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf, int agq) +{ + struct roc_nix_rq *rq; + uint32_t tag_mask; + uint16_t wqe_skip; + uint8_t tt; + int rc; + + rq = &cnxk_eth_dev->rqs[rq_id]; + if (queue_conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) { + tag_mask = agq; + tt = SSO_TT_AGG; + rq->flow_tag_width = 0; + } else { + tag_mask = (port_id & 0xFF) << 20; + tag_mask |= (RTE_EVENT_TYPE_ETHDEV << 28); + tt = queue_conf->ev.sched_type; + rq->flow_tag_width = 20; + if (queue_conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) { + rq->flow_tag_width = 0; + tag_mask |= queue_conf->ev.flow_id; + } + } + + rq->tag_mask = tag_mask; + rq->sso_ena = 1; + rq->tt = tt; + rq->hwgrp = queue_conf->ev.queue_id; + wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ); + wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ; + rq->wqe_skip = wqe_skip; + + rc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0); + return rc; +} + +static int +cn20k_sso_rx_adapter_vwqe_enable(struct cnxk_sso_evdev *dev, uint16_t port_id, uint16_t rq_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + uint32_t agq, tag_mask, stag_mask; + struct roc_sso_agq_data data; + int rc; + + tag_mask = (port_id & 0xff) << 20; + if (queue_conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) + tag_mask |= queue_conf->ev.flow_id; + else + tag_mask |= rq_id; + + stag_mask = tag_mask; + tag_mask |= RTE_EVENT_TYPE_ETHDEV_VECTOR << 28; + stag_mask |= RTE_EVENT_TYPE_ETHDEV << 28; + + memset(&data, 0, sizeof(struct roc_sso_agq_data)); + data.tag = tag_mask; + data.tt = queue_conf->ev.sched_type; + data.stag = stag_mask; + data.vwqe_aura = roc_npa_aura_handle_to_aura(queue_conf->vector_mp->pool_id); + data.vwqe_max_sz_exp = rte_log2_u32(queue_conf->vector_sz); + data.vwqe_wait_tmo = queue_conf->vector_timeout_ns / ((SSO_AGGR_DEF_TMO + 1) * 100); + data.xqe_type = 0; + + rc = roc_sso_hwgrp_agq_alloc(&dev->sso, queue_conf->ev.queue_id, &data); + if (rc < 0) + return rc; + + agq = roc_sso_hwgrp_agq_from_tag(&dev->sso, queue_conf->ev.queue_id, tag_mask, 0); + return agq; +} + +static int +cn20k_rx_adapter_queue_add(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, + int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint16_t port = eth_dev->data->port_id; + struct cnxk_eth_rxq_sp *rxq_sp; + int i, rc = 0, agq = 0; + + if (rx_queue_id < 0) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + rc |= cn20k_rx_adapter_queue_add(event_dev, eth_dev, i, queue_conf); + } else { + rxq_sp = cnxk_eth_rxq_to_sp(eth_dev->data->rx_queues[rx_queue_id]); + cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV); + rc = cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); + if (queue_conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) { + cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp, + RTE_EVENT_TYPE_ETHDEV_VECTOR); + rc = cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); + if (rc < 0) + return rc; + + rc = cn20k_sso_rx_adapter_vwqe_enable(dev, port, rx_queue_id, queue_conf); + if (rc < 0) + return rc; + agq = rc; + } + + rc = cn20k_sso_rxq_enable(cnxk_eth_dev, (uint16_t)rx_queue_id, port, queue_conf, + agq); + + /* Propagate force bp devarg */ + cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp; + cnxk_sso_tstamp_cfg(port, eth_dev, dev); + cnxk_eth_dev->nb_rxq_sso++; + } + + if (rc < 0) { + plt_err("Failed to configure Rx adapter port=%d, q=%d", port, + queue_conf->ev.queue_id); + return rc; + } + + dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags; + return 0; +} + +static int +cn20k_rx_adapter_queue_del(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, + int32_t rx_queue_id) +{ + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct roc_nix_rq *rxq; + int i, rc = 0; + + RTE_SET_USED(event_dev); + if (rx_queue_id < 0) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + cn20k_rx_adapter_queue_del(event_dev, eth_dev, i); + } else { + rxq = &cnxk_eth_dev->rqs[rx_queue_id]; + if (rxq->tt == SSO_TT_AGG) + roc_sso_hwgrp_agq_free(&dev->sso, rxq->hwgrp, rxq->tag_mask); + rc = cnxk_sso_rxq_disable(eth_dev, (uint16_t)rx_queue_id); + cnxk_eth_dev->nb_rxq_sso--; + } + + if (rc < 0) + plt_err("Failed to clear Rx adapter config port=%d, q=%d", eth_dev->data->port_id, + rx_queue_id); + return rc; +} + +static int +cn20k_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev, + const struct rte_eth_dev *eth_dev, int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct roc_sso_hwgrp_stash stash; + struct cn20k_eth_rxq *rxq; + void *lookup_mem; + int rc; + + rc = strncmp(eth_dev->device->driver->name, "net_cn20k", 8); + if (rc) + return -EINVAL; + + rc = cn20k_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id, queue_conf); + if (rc) + return -EINVAL; + + cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn20k_sso_tstamp_hdl_update; + cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev; + + rxq = eth_dev->data->rx_queues[0]; + lookup_mem = rxq->lookup_mem; + cn20k_sso_set_priv_mem(event_dev, lookup_mem); + cn20k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); + if (roc_feature_sso_has_stash() && dev->nb_event_ports > 1) { + stash.hwgrp = queue_conf->ev.queue_id; + stash.stash_offset = CN20K_SSO_DEFAULT_STASH_OFFSET; + stash.stash_count = CN20K_SSO_DEFAULT_STASH_LENGTH; + rc = roc_sso_hwgrp_stash_config(&dev->sso, &stash, 1); + if (rc < 0) + plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc); + } + + return 0; +} + +static int +cn20k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev, + const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) +{ + int rc; + + rc = strncmp(eth_dev->device->driver->name, "net_cn20k", 8); + if (rc) + return -EINVAL; + + return cn20k_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id); +} + +static int +cn20k_sso_rx_adapter_vector_limits(const struct rte_eventdev *dev, + const struct rte_eth_dev *eth_dev, + struct rte_event_eth_rx_adapter_vector_limits *limits) +{ + int ret; + + RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); + ret = strncmp(eth_dev->device->driver->name, "net_cn20k", 8); + if (ret) + return -ENOTSUP; + + limits->log2_sz = true; + limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2; + limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2; + limits->min_timeout_ns = (SSO_AGGR_DEF_TMO + 1) * 100; + limits->max_timeout_ns = (BITMASK_ULL(11, 0) + 1) * limits->min_timeout_ns; + + return 0; +} + +static int +cn20k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev, + uint32_t *caps) +{ + int ret; + + RTE_SET_USED(dev); + ret = strncmp(eth_dev->device->driver->name, "net_cn20k", 8); + if (ret) + *caps = 0; + else + *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT | + RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR; + + return 0; +} + +static void +cn20k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) +{ + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cn20k_eth_txq *txq; + struct roc_nix_sq *sq; + int i; + + if (tx_queue_id < 0) { + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + cn20k_sso_txq_fc_update(eth_dev, i); + } else { + uint16_t sqes_per_sqb; + + sq = &cnxk_eth_dev->sqs[tx_queue_id]; + txq = eth_dev->data->tx_queues[tx_queue_id]; + sqes_per_sqb = 1U << txq->sqes_per_sqb_log2; + if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) + sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / sqes_per_sqb); + txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; + } +} + +static int +cn20k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev, + const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) +{ + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint64_t tx_offloads; + int rc; + + RTE_SET_USED(id); + rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id); + if (rc < 0) + return rc; + + /* Can't enable tstamp if all the ports don't have it enabled. */ + tx_offloads = cnxk_eth_dev->tx_offload_flags; + if (dev->tx_adptr_configured) { + uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F); + uint8_t tstmp_ena = !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F); + + if (tstmp_ena && !tstmp_req) + dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F); + else if (!tstmp_ena && tstmp_req) + tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F); + } + + dev->tx_offloads |= tx_offloads; + cn20k_sso_txq_fc_update(eth_dev, tx_queue_id); + rc = cn20k_sso_updt_tx_adptr_data(event_dev); + if (rc < 0) + return rc; + cn20k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); + dev->tx_adptr_configured = 1; + + return 0; +} + +static int +cn20k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, + const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) +{ + int rc; + + RTE_SET_USED(id); + rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id); + if (rc < 0) + return rc; + return cn20k_sso_updt_tx_adptr_data(event_dev); +} + +static int +cn20k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, uint32_t *caps, + const struct event_timer_adapter_ops **ops) +{ + return cnxk_tim_caps_get(evdev, flags, caps, ops, cn20k_sso_set_priv_mem); +} + +static struct eventdev_ops cn20k_sso_dev_ops = { + .dev_infos_get = cn20k_sso_info_get, + .dev_configure = cn20k_sso_dev_configure, + + .queue_def_conf = cnxk_sso_queue_def_conf, + .queue_setup = cnxk_sso_queue_setup, + .queue_release = cnxk_sso_queue_release, + .queue_attr_set = cnxk_sso_queue_attribute_set, + + .port_def_conf = cnxk_sso_port_def_conf, + .port_setup = cn20k_sso_port_setup, + .port_release = cn20k_sso_port_release, + .port_quiesce = cn20k_sso_port_quiesce, + .port_link = cn20k_sso_port_link, + .port_unlink = cn20k_sso_port_unlink, + .port_link_profile = cn20k_sso_port_link_profile, + .port_unlink_profile = cn20k_sso_port_unlink_profile, + .timeout_ticks = cnxk_sso_timeout_ticks, + + .eth_rx_adapter_caps_get = cn20k_sso_rx_adapter_caps_get, + .eth_rx_adapter_queue_add = cn20k_sso_rx_adapter_queue_add, + .eth_rx_adapter_queue_del = cn20k_sso_rx_adapter_queue_del, + .eth_rx_adapter_start = cnxk_sso_rx_adapter_start, + .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop, + + .eth_rx_adapter_vector_limits_get = cn20k_sso_rx_adapter_vector_limits, + + .eth_tx_adapter_caps_get = cn20k_sso_tx_adapter_caps_get, + .eth_tx_adapter_queue_add = cn20k_sso_tx_adapter_queue_add, + .eth_tx_adapter_queue_del = cn20k_sso_tx_adapter_queue_del, + .eth_tx_adapter_start = cnxk_sso_tx_adapter_start, + .eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop, + .eth_tx_adapter_free = cnxk_sso_tx_adapter_free, + + .timer_adapter_caps_get = cn20k_tim_caps_get, + + .xstats_get = cnxk_sso_xstats_get, + .xstats_reset = cnxk_sso_xstats_reset, + .xstats_get_names = cnxk_sso_xstats_get_names, + + .dump = cnxk_sso_dump, + .dev_start = cn20k_sso_start, + .dev_stop = cn20k_sso_stop, + .dev_close = cn20k_sso_close, + .dev_selftest = cn20k_sso_selftest, +}; + +static int +cn20k_sso_init(struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int rc; + + rc = roc_plt_init(); + if (rc < 0) { + plt_err("Failed to initialize platform model"); + return rc; + } + + event_dev->dev_ops = &cn20k_sso_dev_ops; + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + cn20k_sso_fp_fns_set(event_dev); + return 0; + } + + rc = cnxk_sso_init(event_dev); + if (rc < 0) + return rc; + + cn20k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev)); + if (!dev->max_event_ports || !dev->max_event_queues) { + plt_err("Not enough eventdev resource queues=%d ports=%d", dev->max_event_queues, + dev->max_event_ports); + cnxk_sso_fini(event_dev); + return -ENODEV; + } + + plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d", event_dev->data->name, + dev->max_event_queues, dev->max_event_ports); + + return 0; +} + +static int +cn20k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + return rte_event_pmd_pci_probe(pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), + cn20k_sso_init); +} + +static const struct rte_pci_id cn20k_pci_sso_map[] = { + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN20KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN20KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF), + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver cn20k_pci_sso = { + .id_table = cn20k_pci_sso_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, + .probe = cn20k_sso_probe, + .remove = cnxk_sso_remove, +}; + +RTE_PMD_REGISTER_PCI(event_cn20k, cn20k_pci_sso); +RTE_PMD_REGISTER_PCI_TABLE(event_cn20k, cn20k_pci_sso_map); +RTE_PMD_REGISTER_KMOD_DEP(event_cn20k, "vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(event_cn20k, + CNXK_SSO_XAE_CNT "=" + CNXK_SSO_GGRP_QOS "=" + CNXK_SSO_STASH "=" + CNXK_SSO_FORCE_BP "=1" + CNXK_TIM_DISABLE_NPA "=1" + CNXK_TIM_CHNK_SLOTS "=" + CNXK_TIM_RINGS_LMT "=" + CNXK_TIM_STATS_ENA "=1" + CNXK_TIM_EXT_CLK "="); diff --git a/drivers/event/cnxk/cn20k_eventdev.h b/drivers/event/cnxk/cn20k_eventdev.h new file mode 100644 index 0000000000..8ea2878fa5 --- /dev/null +++ b/drivers/event/cnxk/cn20k_eventdev.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#ifndef __CN20K_EVENTDEV_H__ +#define __CN20K_EVENTDEV_H__ + +#define CN20K_SSO_DEFAULT_STASH_OFFSET -1 +#define CN20K_SSO_DEFAULT_STASH_LENGTH 2 + +struct __rte_cache_aligned cn20k_sso_hws { + uint64_t base; + uint32_t gw_wdata; + void *lookup_mem; + uint64_t gw_rdata; + uint8_t swtag_req; + uint8_t hws_id; + /* PTP timestamp */ + struct cnxk_timesync_info **tstamp; + uint64_t meta_aura; + /* Add Work Fastpath data */ + alignas(RTE_CACHE_LINE_SIZE) int64_t __rte_atomic *fc_mem; + int64_t __rte_atomic *fc_cache_space; + uintptr_t aw_lmt; + uintptr_t grp_base; + uint16_t xae_waes; + int32_t xaq_lmt; + /* Tx Fastpath data */ + alignas(RTE_CACHE_LINE_SIZE) uintptr_t lmt_base; + uint64_t lso_tun_fmt; + uint8_t tx_adptr_data[]; +}; + +#endif /* __CN20K_EVENTDEV_H__ */ diff --git a/drivers/event/cnxk/cn20k_tx_worker.h b/drivers/event/cnxk/cn20k_tx_worker.h new file mode 100644 index 0000000000..b09d845b09 --- /dev/null +++ b/drivers/event/cnxk/cn20k_tx_worker.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef __CN20K_TX_WORKER_H__ +#define __CN20K_TX_WORKER_H__ + +#include +#include + +#include "cn20k_eventdev.h" +#include "cn20k_tx.h" +#include "cnxk_eventdev_dp.h" +#include + +/* CN20K Tx event fastpath */ + +static __rte_always_inline struct cn20k_eth_txq * +cn20k_sso_hws_xtract_meta(struct rte_mbuf *m, const uint64_t *txq_data) +{ + return (struct cn20k_eth_txq *)(txq_data[(txq_data[m->port] >> 48) + + rte_event_eth_tx_adapter_txq_get(m)] & + (BIT_ULL(48) - 1)); +} + +static __rte_always_inline void +cn20k_sso_txq_fc_wait(const struct cn20k_eth_txq *txq) +{ + int64_t avail; + +#ifdef RTE_ARCH_ARM64 + int64_t val; + + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldxr %[val], [%[addr]] \n" + " sub %[val], %[adj], %[val] \n" + " lsl %[refill], %[val], %[shft] \n" + " sub %[refill], %[refill], %[val] \n" + " cmp %[refill], #0x0 \n" + " b.gt .Ldne%= \n" + " sevl \n" + ".Lrty%=: wfe \n" + " ldxr %[val], [%[addr]] \n" + " sub %[val], %[adj], %[val] \n" + " lsl %[refill], %[val], %[shft] \n" + " sub %[refill], %[refill], %[val] \n" + " cmp %[refill], #0x0 \n" + " b.le .Lrty%= \n" + ".Ldne%=: \n" + : [refill] "=&r"(avail), [val] "=&r" (val) + : [addr] "r" (txq->fc_mem), [adj] "r" (txq->nb_sqb_bufs_adj), + [shft] "r" (txq->sqes_per_sqb_log2) + : "memory"); +#else + do { + avail = txq->nb_sqb_bufs_adj - + rte_atomic_load_explicit((uint64_t __rte_atomic *)txq->fc_mem, + rte_memory_order_relaxed); + } while (((avail << txq->sqes_per_sqb_log2) - avail) <= 0); +#endif +} + +static __rte_always_inline int32_t +cn20k_sso_sq_depth(const struct cn20k_eth_txq *txq) +{ + int32_t avail = (int32_t)txq->nb_sqb_bufs_adj - + (int32_t)rte_atomic_load_explicit((uint64_t __rte_atomic *)txq->fc_mem, + rte_memory_order_relaxed); + return (avail << txq->sqes_per_sqb_log2) - avail; +} + +static __rte_always_inline uint16_t +cn20k_sso_tx_one(struct cn20k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, uint16_t lmt_id, + uintptr_t lmt_addr, uint8_t sched_type, const uint64_t *txq_data, + const uint32_t flags) +{ + uint8_t lnum = 0, loff = 0, shft = 0; + struct rte_mbuf *extm = NULL; + struct cn20k_eth_txq *txq; + uintptr_t laddr; + uint16_t segdw; + uintptr_t pa; + bool sec; + + txq = cn20k_sso_hws_xtract_meta(m, txq_data); + if (cn20k_sso_sq_depth(txq) <= 0) + return 0; + + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena) + handle_tx_completion_pkts(txq, 1); + + cn20k_nix_tx_skeleton(txq, cmd, flags, 0); + /* Perform header writes before barrier + * for TSO + */ + if (flags & NIX_TX_OFFLOAD_TSO_F) + cn20k_nix_xmit_prepare_tso(m, flags); + + cn20k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, &sec, txq->mark_flag, + txq->mark_fmt); + + laddr = lmt_addr; + /* Prepare CPT instruction and get nixtx addr if + * it is for CPT on same lmtline. + */ + if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec) + cn20k_nix_prep_sec(m, cmd, &laddr, lmt_addr, &lnum, &loff, &shft, txq->sa_base, + flags); + + /* Move NIX desc to LMT/NIXTX area */ + cn20k_nix_xmit_mv_lmt_base(laddr, cmd, flags); + + if (flags & NIX_TX_MULTI_SEG_F) + segdw = cn20k_nix_prepare_mseg(txq, m, &extm, (uint64_t *)laddr, flags); + else + segdw = cn20k_nix_tx_ext_subs(flags) + 2; + + cn20k_nix_xmit_prepare_tstamp(txq, laddr, m->ol_flags, segdw, flags); + if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec) + pa = txq->cpt_io_addr | 3 << 4; + else + pa = txq->io_addr | ((segdw - 1) << 4); + + if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type) + ws->gw_rdata = roc_sso_hws_head_wait(ws->base); + + cn20k_sso_txq_fc_wait(txq); + if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec) + cn20k_nix_sec_fc_wait_one(txq); + + roc_lmt_submit_steorl(lmt_id, pa); + + /* Memory barrier to make sure lmtst store completes */ + rte_io_wmb(); + + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) + cn20k_nix_free_extmbuf(extm); + + return 1; +} + +static __rte_always_inline uint16_t +cn20k_sso_vwqe_split_tx(struct cn20k_sso_hws *ws, struct rte_mbuf **mbufs, uint16_t nb_mbufs, + uint64_t *cmd, const uint64_t *txq_data, const uint32_t flags) +{ + uint16_t count = 0, port, queue, ret = 0, last_idx = 0; + struct cn20k_eth_txq *txq; + int32_t space; + int i; + + port = mbufs[0]->port; + queue = rte_event_eth_tx_adapter_txq_get(mbufs[0]); + for (i = 0; i < nb_mbufs; i++) { + if (port != mbufs[i]->port || queue != rte_event_eth_tx_adapter_txq_get(mbufs[i])) { + if (count) { + txq = (struct cn20k_eth_txq + *)(txq_data[(txq_data[port] >> 48) + queue] & + (BIT_ULL(48) - 1)); + /* Transmit based on queue depth */ + space = cn20k_sso_sq_depth(txq); + if (space < count) + goto done; + cn20k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, &mbufs[last_idx], + count, cmd, flags | NIX_TX_VWQE_F); + ret += count; + count = 0; + } + port = mbufs[i]->port; + queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]); + last_idx = i; + } + count++; + } + if (count) { + txq = (struct cn20k_eth_txq *)(txq_data[(txq_data[port] >> 48) + queue] & + (BIT_ULL(48) - 1)); + /* Transmit based on queue depth */ + space = cn20k_sso_sq_depth(txq); + if (space < count) + goto done; + cn20k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, &mbufs[last_idx], count, cmd, + flags | NIX_TX_VWQE_F); + ret += count; + } +done: + return ret; +} + +static __rte_always_inline uint16_t +cn20k_sso_hws_event_tx(struct cn20k_sso_hws *ws, struct rte_event *ev, uint64_t *cmd, + const uint64_t *txq_data, const uint32_t flags) +{ + struct cn20k_eth_txq *txq; + struct rte_mbuf *m; + uintptr_t lmt_addr; + uint16_t lmt_id; + + lmt_addr = ws->lmt_base; + ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id); + + if (ev->event_type & RTE_EVENT_TYPE_VECTOR) { + struct rte_mbuf **mbufs = ev->vec->mbufs; + uint64_t meta = *(uint64_t *)ev->vec; + uint16_t offset, nb_pkts, left; + int32_t space; + + nb_pkts = meta & 0xFFFF; + offset = (meta >> 16) & 0xFFF; + if (meta & BIT(31)) { + txq = (struct cn20k_eth_txq + *)(txq_data[(txq_data[meta >> 32] >> 48) + (meta >> 48)] & + (BIT_ULL(48) - 1)); + + /* Transmit based on queue depth */ + space = cn20k_sso_sq_depth(txq); + if (space <= 0) + return 0; + nb_pkts = nb_pkts < space ? nb_pkts : (uint16_t)space; + cn20k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, mbufs + offset, nb_pkts, + cmd, flags | NIX_TX_VWQE_F); + } else { + nb_pkts = cn20k_sso_vwqe_split_tx(ws, mbufs + offset, nb_pkts, cmd, + txq_data, flags); + } + left = (meta & 0xFFFF) - nb_pkts; + + if (!left) { + rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec); + } else { + *(uint64_t *)ev->vec = + (meta & ~0xFFFFFFFUL) | (((uint32_t)nb_pkts + offset) << 16) | left; + } + rte_prefetch0(ws); + return !left; + } + + m = ev->mbuf; + return cn20k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data, flags); +} + +#define T(name, sz, flags) \ + uint16_t __rte_hot cn20k_sso_hws_tx_adptr_enq_##name(void *port, struct rte_event ev[], \ + uint16_t nb_events); \ + uint16_t __rte_hot cn20k_sso_hws_tx_adptr_enq_seg_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events); + +NIX_TX_FASTPATH_MODES +#undef T + +#define SSO_TX(fn, sz, flags) \ + uint16_t __rte_hot fn(void *port, struct rte_event ev[], uint16_t nb_events) \ + { \ + struct cn20k_sso_hws *ws = port; \ + uint64_t cmd[sz]; \ + RTE_SET_USED(nb_events); \ + return cn20k_sso_hws_event_tx(ws, &ev[0], cmd, \ + (const uint64_t *)ws->tx_adptr_data, flags); \ + } + +#define SSO_TX_SEG(fn, sz, flags) \ + uint16_t __rte_hot fn(void *port, struct rte_event ev[], uint16_t nb_events) \ + { \ + uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \ + struct cn20k_sso_hws *ws = port; \ + RTE_SET_USED(nb_events); \ + return cn20k_sso_hws_event_tx(ws, &ev[0], cmd, \ + (const uint64_t *)ws->tx_adptr_data, \ + (flags) | NIX_TX_MULTI_SEG_F); \ + } + +uint16_t __rte_hot cn20k_sso_hws_tx_adptr_enq_seg_all_offload(void *port, struct rte_event ev[], + uint16_t nb_events); +uint16_t __rte_hot cn20k_sso_hws_tx_adptr_enq_seg_all_offload_tst(void *port, struct rte_event ev[], + uint16_t nb_events); + +#endif diff --git a/drivers/event/cnxk/cn20k_worker.c b/drivers/event/cnxk/cn20k_worker.c new file mode 100644 index 0000000000..53daf3b4b0 --- /dev/null +++ b/drivers/event/cnxk/cn20k_worker.c @@ -0,0 +1,425 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +#include "roc_api.h" + +#include "cn20k_worker.h" +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +/* SSO Operations */ + +static __rte_always_inline uint8_t +cn20k_sso_hws_new_event(struct cn20k_sso_hws *ws, const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint64_t event_ptr = ev->u64; + const uint16_t grp = ev->queue_id; + + rte_atomic_thread_fence(rte_memory_order_acq_rel); + if (ws->xaq_lmt <= *ws->fc_mem) + return 0; + + cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grp_base + (grp << 12)); + return 1; +} + +static __rte_always_inline void +cn20k_sso_hws_fwd_swtag(struct cn20k_sso_hws *ws, const struct rte_event *ev) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + const uint8_t cur_tt = CNXK_TT_FROM_TAG(ws->gw_rdata); + + /* CNXK model + * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED + * + * SSO_TT_ORDERED norm norm untag + * SSO_TT_ATOMIC norm norm untag + * SSO_TT_UNTAGGED norm norm NOOP + */ + + if (new_tt == SSO_TT_UNTAGGED) { + if (cur_tt != SSO_TT_UNTAGGED) + cnxk_sso_hws_swtag_untag(ws->base + SSOW_LF_GWS_OP_SWTAG_UNTAG); + } else { + cnxk_sso_hws_swtag_norm(tag, new_tt, ws->base + SSOW_LF_GWS_OP_SWTAG_NORM); + } + ws->swtag_req = 1; +} + +static __rte_always_inline void +cn20k_sso_hws_fwd_group(struct cn20k_sso_hws *ws, const struct rte_event *ev, const uint16_t grp) +{ + const uint32_t tag = (uint32_t)ev->event; + const uint8_t new_tt = ev->sched_type; + + plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1); + cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED); +} + +static __rte_always_inline void +cn20k_sso_hws_forward_event(struct cn20k_sso_hws *ws, const struct rte_event *ev) +{ + const uint8_t grp = ev->queue_id; + + /* Group hasn't changed, Use SWTAG to forward the event */ + if (CNXK_GRP_FROM_TAG(ws->gw_rdata) == grp) + cn20k_sso_hws_fwd_swtag(ws, ev); + else + /* + * Group has been changed for group based work pipelining, + * Use deschedule/add_work operation to transfer the event to + * new group/core + */ + cn20k_sso_hws_fwd_group(ws, ev, grp); +} + +static inline int32_t +sso_read_xaq_space(struct cn20k_sso_hws *ws) +{ + return (ws->xaq_lmt - rte_atomic_load_explicit(ws->fc_mem, rte_memory_order_relaxed)) * + ws->xae_waes; +} + +static inline void +sso_lmt_aw_wait_fc(struct cn20k_sso_hws *ws, int64_t req) +{ + int64_t cached, refill; + +retry: + while (rte_atomic_load_explicit(ws->fc_cache_space, rte_memory_order_relaxed) < 0) + ; + + cached = rte_atomic_fetch_sub_explicit(ws->fc_cache_space, req, rte_memory_order_acquire) - + req; + /* Check if there is enough space, else update and retry. */ + if (cached < 0) { + /* Check if we have space else retry. */ + do { + refill = sso_read_xaq_space(ws); + } while (refill <= 0); + rte_atomic_compare_exchange_strong_explicit(ws->fc_cache_space, &cached, refill, + rte_memory_order_release, + rte_memory_order_relaxed); + + goto retry; + } +} + +#define VECTOR_SIZE_BITS 0xFFFFFFFFFFF80000ULL +#define VECTOR_GET_LINE_OFFSET(line) (19 + (3 * line)) + +static uint64_t +vector_size_partial_mask(uint16_t off, uint16_t cnt) +{ + return (VECTOR_SIZE_BITS & ~(~0x0ULL << off)) | ((uint64_t)(cnt - 1) << off); +} + +static __rte_always_inline uint16_t +cn20k_sso_hws_new_event_lmtst(struct cn20k_sso_hws *ws, uint8_t queue_id, + const struct rte_event ev[], uint16_t n) +{ + uint16_t lines, partial_line, burst, left; + uint64_t wdata[2], pa[2] = {0}; + uintptr_t lmt_addr; + uint16_t sz0, sz1; + uint16_t lmt_id; + + sz0 = sz1 = 0; + lmt_addr = ws->aw_lmt; + ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id); + + left = n; +again: + burst = RTE_MIN(BIT(ROC_SSO_AW_PER_LMT_LINE_LOG2 + ROC_LMT_LINES_PER_CORE_LOG2), left); + + /* Set wdata */ + lines = burst >> ROC_SSO_AW_PER_LMT_LINE_LOG2; + partial_line = burst & (BIT(ROC_SSO_AW_PER_LMT_LINE_LOG2) - 1); + wdata[0] = wdata[1] = 0; + if (lines > BIT(ROC_LMT_LINES_PER_STR_LOG2)) { + wdata[0] = lmt_id; + wdata[0] |= 15ULL << 12; + wdata[0] |= VECTOR_SIZE_BITS; + pa[0] = (ws->grp_base + (queue_id << 12) + SSO_LF_GGRP_OP_AW_LMTST) | (0x7 << 4); + sz0 = 16 << ROC_SSO_AW_PER_LMT_LINE_LOG2; + + wdata[1] = lmt_id + 16; + pa[1] = (ws->grp_base + (queue_id << 12) + SSO_LF_GGRP_OP_AW_LMTST) | (0x7 << 4); + + lines -= 17; + wdata[1] |= partial_line ? (uint64_t)(lines + 1) << 12 : (uint64_t)(lines << 12); + wdata[1] |= partial_line ? vector_size_partial_mask(VECTOR_GET_LINE_OFFSET(lines), + partial_line) : + VECTOR_SIZE_BITS; + sz1 = burst - sz0; + partial_line = 0; + } else if (lines) { + /* We need to handle two cases here: + * 1. Partial line spill over to wdata[1] i.e. lines == 16 + * 2. Partial line with spill lines < 16. + */ + wdata[0] = lmt_id; + pa[0] = (ws->grp_base + (queue_id << 12) + SSO_LF_GGRP_OP_AW_LMTST) | (0x7 << 4); + sz0 = lines << ROC_SSO_AW_PER_LMT_LINE_LOG2; + if (lines == 16) { + wdata[0] |= 15ULL << 12; + wdata[0] |= VECTOR_SIZE_BITS; + if (partial_line) { + wdata[1] = lmt_id + 16; + pa[1] = (ws->grp_base + (queue_id << 12) + + SSO_LF_GGRP_OP_AW_LMTST) | + ((partial_line - 1) << 4); + } + } else { + lines -= 1; + wdata[0] |= partial_line ? (uint64_t)(lines + 1) << 12 : + (uint64_t)(lines << 12); + wdata[0] |= partial_line ? + vector_size_partial_mask(VECTOR_GET_LINE_OFFSET(lines), + partial_line) : + VECTOR_SIZE_BITS; + sz0 += partial_line; + } + sz1 = burst - sz0; + partial_line = 0; + } + + /* Only partial lines */ + if (partial_line) { + wdata[0] = lmt_id; + pa[0] = (ws->grp_base + (queue_id << 12) + SSO_LF_GGRP_OP_AW_LMTST) | + ((partial_line - 1) << 4); + sz0 = partial_line; + sz1 = burst - sz0; + } + +#if defined(RTE_ARCH_ARM64) + uint64x2_t aw_mask = {0xC0FFFFFFFFULL, ~0x0ULL}; + uint64x2_t tt_mask = {0x300000000ULL, 0}; + uint16_t parts; + + while (burst) { + parts = burst > 7 ? 8 : plt_align32prevpow2(burst); + burst -= parts; + /* Lets try to fill at least one line per burst. */ + switch (parts) { + case 8: { + uint64x2_t aw0, aw1, aw2, aw3, aw4, aw5, aw6, aw7; + + aw0 = vandq_u64(vld1q_u64((const uint64_t *)&ev[0]), aw_mask); + aw1 = vandq_u64(vld1q_u64((const uint64_t *)&ev[1]), aw_mask); + aw2 = vandq_u64(vld1q_u64((const uint64_t *)&ev[2]), aw_mask); + aw3 = vandq_u64(vld1q_u64((const uint64_t *)&ev[3]), aw_mask); + aw4 = vandq_u64(vld1q_u64((const uint64_t *)&ev[4]), aw_mask); + aw5 = vandq_u64(vld1q_u64((const uint64_t *)&ev[5]), aw_mask); + aw6 = vandq_u64(vld1q_u64((const uint64_t *)&ev[6]), aw_mask); + aw7 = vandq_u64(vld1q_u64((const uint64_t *)&ev[7]), aw_mask); + + aw0 = vorrq_u64(vandq_u64(vshrq_n_u64(aw0, 6), tt_mask), aw0); + aw1 = vorrq_u64(vandq_u64(vshrq_n_u64(aw1, 6), tt_mask), aw1); + aw2 = vorrq_u64(vandq_u64(vshrq_n_u64(aw2, 6), tt_mask), aw2); + aw3 = vorrq_u64(vandq_u64(vshrq_n_u64(aw3, 6), tt_mask), aw3); + aw4 = vorrq_u64(vandq_u64(vshrq_n_u64(aw4, 6), tt_mask), aw4); + aw5 = vorrq_u64(vandq_u64(vshrq_n_u64(aw5, 6), tt_mask), aw5); + aw6 = vorrq_u64(vandq_u64(vshrq_n_u64(aw6, 6), tt_mask), aw6); + aw7 = vorrq_u64(vandq_u64(vshrq_n_u64(aw7, 6), tt_mask), aw7); + + vst1q_u64((void *)lmt_addr, aw0); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 16), aw1); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 32), aw2); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 48), aw3); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 64), aw4); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 80), aw5); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 96), aw6); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 112), aw7); + lmt_addr = (uintptr_t)PLT_PTR_ADD(lmt_addr, 128); + } break; + case 4: { + uint64x2_t aw0, aw1, aw2, aw3; + aw0 = vandq_u64(vld1q_u64((const uint64_t *)&ev[0]), aw_mask); + aw1 = vandq_u64(vld1q_u64((const uint64_t *)&ev[1]), aw_mask); + aw2 = vandq_u64(vld1q_u64((const uint64_t *)&ev[2]), aw_mask); + aw3 = vandq_u64(vld1q_u64((const uint64_t *)&ev[3]), aw_mask); + + aw0 = vorrq_u64(vandq_u64(vshrq_n_u64(aw0, 6), tt_mask), aw0); + aw1 = vorrq_u64(vandq_u64(vshrq_n_u64(aw1, 6), tt_mask), aw1); + aw2 = vorrq_u64(vandq_u64(vshrq_n_u64(aw2, 6), tt_mask), aw2); + aw3 = vorrq_u64(vandq_u64(vshrq_n_u64(aw3, 6), tt_mask), aw3); + + vst1q_u64((void *)lmt_addr, aw0); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 16), aw1); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 32), aw2); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 48), aw3); + lmt_addr = (uintptr_t)PLT_PTR_ADD(lmt_addr, 64); + } break; + case 2: { + uint64x2_t aw0, aw1; + + aw0 = vandq_u64(vld1q_u64((const uint64_t *)&ev[0]), aw_mask); + aw1 = vandq_u64(vld1q_u64((const uint64_t *)&ev[1]), aw_mask); + + aw0 = vorrq_u64(vandq_u64(vshrq_n_u64(aw0, 6), tt_mask), aw0); + aw1 = vorrq_u64(vandq_u64(vshrq_n_u64(aw1, 6), tt_mask), aw1); + + vst1q_u64((void *)lmt_addr, aw0); + vst1q_u64((void *)PLT_PTR_ADD(lmt_addr, 16), aw1); + lmt_addr = (uintptr_t)PLT_PTR_ADD(lmt_addr, 32); + } break; + case 1: { + __uint128_t aw0; + + aw0 = ev[0].u64; + aw0 <<= 64; + aw0 |= ev[0].event & (BIT_ULL(32) - 1); + aw0 |= (uint64_t)ev[0].sched_type << 32; + + *((__uint128_t *)lmt_addr) = aw0; + lmt_addr = (uintptr_t)PLT_PTR_ADD(lmt_addr, 16); + } break; + } + ev += parts; + } +#else + uint16_t i; + + for (i = 0; i < burst; i++) { + __uint128_t aw0; + + aw0 = ev[0].u64; + aw0 <<= 64; + aw0 |= ev[0].event & (BIT_ULL(32) - 1); + aw0 |= (uint64_t)ev[0].sched_type << 32; + *((__uint128_t *)lmt_addr) = aw0; + lmt_addr = (uintptr_t)PLT_PTR_ADD(lmt_addr, 16); + } +#endif + + /* wdata[0] will be always valid */ + sso_lmt_aw_wait_fc(ws, sz0); + roc_lmt_submit_steorl(wdata[0], pa[0]); + if (wdata[1]) { + sso_lmt_aw_wait_fc(ws, sz1); + roc_lmt_submit_steorl(wdata[1], pa[1]); + } + + left -= (sz0 + sz1); + if (left) + goto again; + + return n; +} + +uint16_t __rte_hot +cn20k_sso_hws_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events) +{ + struct cn20k_sso_hws *ws = port; + + RTE_SET_USED(nb_events); + switch (ev->op) { + case RTE_EVENT_OP_NEW: + return cn20k_sso_hws_new_event(ws, ev); + case RTE_EVENT_OP_FORWARD: + cn20k_sso_hws_forward_event(ws, ev); + break; + case RTE_EVENT_OP_RELEASE: + if (ws->swtag_req) { + cnxk_sso_hws_desched(ev->u64, ws->base); + ws->swtag_req = 0; + break; + } + cnxk_sso_hws_swtag_flush(ws->base); + break; + default: + return 0; + } + + return 1; +} + +uint16_t __rte_hot +cn20k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events) +{ + uint16_t idx = 0, done = 0, rc = 0; + struct cn20k_sso_hws *ws = port; + uint8_t queue_id; + int32_t space; + + /* Do a common back-pressure check and return */ + space = sso_read_xaq_space(ws) - ws->xae_waes; + if (space <= 0) + return 0; + nb_events = space < nb_events ? space : nb_events; + + do { + queue_id = ev[idx].queue_id; + for (idx = idx + 1; idx < nb_events; idx++) + if (queue_id != ev[idx].queue_id) + break; + + rc = cn20k_sso_hws_new_event_lmtst(ws, queue_id, &ev[done], idx - done); + if (rc != (idx - done)) + return rc + done; + done += rc; + + } while (done < nb_events); + + return done; +} + +uint16_t __rte_hot +cn20k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events) +{ + struct cn20k_sso_hws *ws = port; + + RTE_SET_USED(nb_events); + cn20k_sso_hws_forward_event(ws, ev); + + return 1; +} + +int __rte_hot +cn20k_sso_hws_profile_switch(void *port, uint8_t profile) +{ + struct cn20k_sso_hws *ws = port; + + ws->gw_wdata &= ~(0xFFUL); + ws->gw_wdata |= (profile + 1); + + return 0; +} + +int __rte_hot +cn20k_sso_hws_preschedule_modify(void *port, enum rte_event_dev_preschedule_type type) +{ + struct cn20k_sso_hws *ws = port; + + ws->gw_wdata &= ~(BIT(19) | BIT(20)); + switch (type) { + default: + case RTE_EVENT_PRESCHEDULE_NONE: + break; + case RTE_EVENT_PRESCHEDULE: + ws->gw_wdata |= BIT(19); + break; + case RTE_EVENT_PRESCHEDULE_ADAPTIVE: + ws->gw_wdata |= BIT(19) | BIT(20); + break; + } + + return 0; +} + +void __rte_hot +cn20k_sso_hws_preschedule(void *port, enum rte_event_dev_preschedule_type type) +{ + struct cn20k_sso_hws *ws = port; + + RTE_SET_USED(type); + plt_write64(ws->gw_wdata, ws->base + SSOW_LF_GWS_OP_PRF_GETWORK); +} diff --git a/drivers/event/cnxk/cn20k_worker.h b/drivers/event/cnxk/cn20k_worker.h new file mode 100644 index 0000000000..b014e549b9 --- /dev/null +++ b/drivers/event/cnxk/cn20k_worker.h @@ -0,0 +1,378 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef __CN20K_WORKER_H__ +#define __CN20K_WORKER_H__ + +#include +#include + +#include "cn20k_eventdev.h" +#include "cn20k_rx.h" +#include "cnxk_worker.h" + +/* CN20K Rx event fastpath */ + +static __rte_always_inline void +cn20k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id, const uint32_t tag, + const uint32_t flags, const void *const lookup_mem, uintptr_t cpth, + uintptr_t sa_base) +{ + const uint64_t mbuf_init = + 0x100010000ULL | RTE_PKTMBUF_HEADROOM | (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0); + struct rte_mbuf *mbuf = (struct rte_mbuf *)__mbuf; + + cn20k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag, (struct rte_mbuf *)mbuf, lookup_mem, + mbuf_init | ((uint64_t)port_id) << 48, cpth, sa_base, flags); +} + +static void +cn20k_sso_process_tstamp(uint64_t u64, uint64_t mbuf, struct cnxk_timesync_info *tstamp) +{ + uint64_t tstamp_ptr; + uint8_t laptr; + + laptr = (uint8_t)*(uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t))); + if (laptr == sizeof(uint64_t)) { + /* Extracting tstamp, if PTP enabled*/ + tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) + CNXK_SSO_WQE_SG_PTR); + cn20k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true, + (uint64_t *)tstamp_ptr); + } +} + +static __rte_always_inline void +cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struct cn20k_sso_hws *ws) +{ + uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM; + struct cnxk_timesync_info *tstamp = ws->tstamp[port_id]; + void *lookup_mem = ws->lookup_mem; + uintptr_t lbase = ws->lmt_base; + struct rte_event_vector *vec; + uint16_t nb_mbufs, non_vec; + struct rte_mbuf **wqe; + struct rte_mbuf *mbuf; + uint64_t sa_base = 0; + uintptr_t cpth = 0; + int i; + + mbuf_init |= ((uint64_t)port_id) << 48; + vec = (struct rte_event_vector *)vwqe; + wqe = vec->mbufs; + + rte_prefetch0(&vec->ptrs[0]); +#define OBJS_PER_CLINE (RTE_CACHE_LINE_SIZE / sizeof(void *)) + for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE) + rte_prefetch0(&vec->ptrs[i]); + + if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp) + mbuf_init |= 8; + + nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP); + nb_mbufs = cn20k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs, flags | NIX_RX_VWQE_F, + lookup_mem, tstamp, lbase, 0); + wqe += nb_mbufs; + non_vec = vec->nb_elem - nb_mbufs; + + while (non_vec) { + struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0]; + + mbuf = (struct rte_mbuf *)((char *)cqe - sizeof(struct rte_mbuf)); + + /* Mark mempool obj as "get" as it is alloc'ed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1); + + cn20k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base, + flags); + + if (flags & NIX_RX_OFFLOAD_TSTAMP_F) + cn20k_sso_process_tstamp((uint64_t)wqe[0], (uint64_t)mbuf, tstamp); + wqe[0] = (struct rte_mbuf *)mbuf; + non_vec--; + wqe++; + } +} + +static __rte_always_inline void +cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, uint64_t *u64, const uint32_t flags) +{ + uintptr_t sa_base = 0; + + u64[0] = (u64[0] & (0x3ull << 32)) << 6 | (u64[0] & (0x3FFull << 36)) << 4 | + (u64[0] & 0xffffffff); + if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) { + uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]); + uintptr_t cpth = 0; + uint64_t mbuf; + + mbuf = u64[1] - sizeof(struct rte_mbuf); + rte_prefetch0((void *)mbuf); + + /* Mark mempool obj as "get" as it is alloc'ed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(((struct rte_mbuf *)mbuf)->pool, (void **)&mbuf, 1, 1); + + u64[0] = CNXK_CLR_SUB_EVENT(u64[0]); + cn20k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags, ws->lookup_mem, cpth, + sa_base); + if (flags & NIX_RX_OFFLOAD_TSTAMP_F) + cn20k_sso_process_tstamp(u64[1], mbuf, ws->tstamp[port]); + u64[1] = mbuf; + } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV_VECTOR) { + uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]); + __uint128_t vwqe_hdr = *(__uint128_t *)u64[1]; + + vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) | ((vwqe_hdr & 0xFFFF) << 48) | + ((uint64_t)port << 32); + *(uint64_t *)u64[1] = (uint64_t)vwqe_hdr; + cn20k_process_vwqe(u64[1], port, flags, ws); + /* Mark vector mempool object as get */ + RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]), (void **)&u64[1], 1, + 1); + } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_TIMER) { + struct rte_event_timer *tev = (struct rte_event_timer *)u64[1]; + + tev->state = RTE_EVENT_TIMER_NOT_ARMED; + u64[1] = tev->ev.u64; + } +} + +static __rte_always_inline uint16_t +cn20k_sso_hws_get_work(struct cn20k_sso_hws *ws, struct rte_event *ev, const uint32_t flags) +{ + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + + gw.get_work = ws->gw_wdata; +#if defined(RTE_ARCH_ARM64) +#if defined(__clang__) + register uint64_t x0 __asm("x0") = (uint64_t)gw.u64[0]; + register uint64_t x1 __asm("x1") = (uint64_t)gw.u64[1]; +#if defined(RTE_ARM_USE_WFE) + plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0); + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldp %[x0], %[x1], [%[tag_loc]] \n" + " tbz %[x0], %[pend_gw], done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldp %[x0], %[x1], [%[tag_loc]] \n" + " tbnz %[x0], %[pend_gw], rty%= \n" + "done%=: \n" + " dmb ld \n" + : [x0] "+r" (x0), [x1] "+r" (x1) + : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0), + [pend_gw] "i"(SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT) + : "memory"); +#else + asm volatile(".arch armv8-a+lse\n" + "caspal %[x0], %[x1], %[x0], %[x1], [%[dst]]\n" + : [x0] "+r" (x0), [x1] "+r" (x1) + : [dst] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0) + : "memory"); +#endif + gw.u64[0] = x0; + gw.u64[1] = x1; +#else +#if defined(RTE_ARM_USE_WFE) + plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0); + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldp %[wdata], %H[wdata], [%[tag_loc]] \n" + " tbz %[wdata], %[pend_gw], done%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldp %[wdata], %H[wdata], [%[tag_loc]] \n" + " tbnz %[wdata], %[pend_gw], rty%= \n" + "done%=: \n" + " dmb ld \n" + : [wdata] "=&r"(gw.get_work) + : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0), + [pend_gw] "i"(SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT) + : "memory"); +#else + asm volatile(PLT_CPU_FEATURE_PREAMBLE + "caspal %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n" + : [wdata] "+r"(gw.get_work) + : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0) + : "memory"); +#endif +#endif +#else + plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0); + do { + roc_load_pair(gw.u64[0], gw.u64[1], ws->base + SSOW_LF_GWS_WQE0); + } while (gw.u64[0] & BIT_ULL(63)); + rte_atomic_thread_fence(rte_memory_order_seq_cst); +#endif + ws->gw_rdata = gw.u64[0]; + if (gw.u64[1]) + cn20k_sso_hws_post_process(ws, gw.u64, flags); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +/* Used in cleaning up workslot. */ +static __rte_always_inline uint16_t +cn20k_sso_hws_get_work_empty(struct cn20k_sso_hws *ws, struct rte_event *ev, const uint32_t flags) +{ + union { + __uint128_t get_work; + uint64_t u64[2]; + } gw; + +#ifdef RTE_ARCH_ARM64 + asm volatile(PLT_CPU_FEATURE_PREAMBLE + " ldp %[tag], %[wqp], [%[tag_loc]] \n" + " tbz %[tag], 63, .Ldone%= \n" + " sevl \n" + ".Lrty%=: wfe \n" + " ldp %[tag], %[wqp], [%[tag_loc]] \n" + " tbnz %[tag], 63, .Lrty%= \n" + ".Ldone%=: dmb ld \n" + : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]) + : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0) + : "memory"); +#else + do { + roc_load_pair(gw.u64[0], gw.u64[1], ws->base + SSOW_LF_GWS_WQE0); + } while (gw.u64[0] & BIT_ULL(63)); +#endif + + ws->gw_rdata = gw.u64[0]; + if (gw.u64[1]) + cn20k_sso_hws_post_process(ws, gw.u64, flags); + else + gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 | + (gw.u64[0] & (0x3FFull << 36)) << 4 | (gw.u64[0] & 0xffffffff); + + ev->event = gw.u64[0]; + ev->u64 = gw.u64[1]; + + return !!gw.u64[1]; +} + +/* CN20K Fastpath functions. */ +uint16_t __rte_hot cn20k_sso_hws_enq_burst(void *port, const struct rte_event ev[], + uint16_t nb_events); +uint16_t __rte_hot cn20k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[], + uint16_t nb_events); +uint16_t __rte_hot cn20k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[], + uint16_t nb_events); +int __rte_hot cn20k_sso_hws_profile_switch(void *port, uint8_t profile); +int __rte_hot cn20k_sso_hws_preschedule_modify(void *port, + enum rte_event_dev_preschedule_type type); +void __rte_hot cn20k_sso_hws_preschedule(void *port, enum rte_event_dev_preschedule_type type); + +#define R(name, flags) \ + uint16_t __rte_hot cn20k_sso_hws_deq_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_tmo_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_tmo_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_tmo_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_ca_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_deq_tmo_ca_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_tmo_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_tmo_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_tmo_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_ca_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn20k_sso_hws_reas_deq_tmo_ca_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); + +NIX_RX_FASTPATH_MODES +#undef R + +#define SSO_DEQ(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn20k_sso_hws *ws = port; \ + RTE_SET_USED(timeout_ticks); \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \ + return 1; \ + } \ + return cn20k_sso_hws_get_work(ws, ev, flags); \ + } + +#define SSO_DEQ_SEG(fn, flags) SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F) + +#define SSO_DEQ_TMO(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn20k_sso_hws *ws = port; \ + uint16_t ret = 1; \ + uint64_t iter; \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \ + return ret; \ + } \ + ret = cn20k_sso_hws_get_work(ws, ev, flags); \ + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \ + ret = cn20k_sso_hws_get_work(ws, ev, flags); \ + return ret; \ + } + +#define SSO_DEQ_TMO_SEG(fn, flags) SSO_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F) + +#define R(name, flags) \ + SSO_DEQ(cn20k_sso_hws_deq_##name, flags) \ + SSO_DEQ(cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) \ + SSO_DEQ_SEG(cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_DEQ_SEG(cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) \ + SSO_DEQ_TMO(cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_DEQ_TMO(cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) \ + SSO_DEQ_TMO_SEG(cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_DEQ_TMO_SEG(cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + +NIX_RX_FASTPATH_MODES +#undef R + +#define SSO_CMN_DEQ_BURST(fnb, fn, flags) \ + uint16_t __rte_hot fnb(void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks) \ + { \ + RTE_SET_USED(nb_events); \ + return fn(port, ev, timeout_ticks); \ + } + +#define SSO_CMN_DEQ_SEG_BURST(fnb, fn, flags) \ + uint16_t __rte_hot fnb(void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks) \ + { \ + RTE_SET_USED(nb_events); \ + return fn(port, ev, timeout_ticks); \ + } + +uint16_t __rte_hot cn20k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], + uint16_t nb_events, uint64_t timeout_ticks); +uint16_t __rte_hot cn20k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], + uint16_t nb_events, + uint64_t timeout_ticks); + +#endif diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c index 377e910837..05e237c005 100644 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@ -74,7 +74,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base) if (dev->dual_ws) { dws = hws; dws->grp_base = grp_base; - dws->fc_mem = (uint64_t *)dev->fc_iova; + dws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova; dws->xaq_lmt = dev->xaq_lmt; plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM); @@ -82,7 +82,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base) } else { ws = hws; ws->grp_base = grp_base; - ws->fc_mem = (uint64_t *)dev->fc_iova; + ws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova; ws->xaq_lmt = dev->xaq_lmt; plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM); @@ -314,48 +314,24 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) #if !defined(CNXK_DIS_TMPLT_FUNC) struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); /* Single WS modes */ - const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_deq_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name, NIX_RX_FASTPATH_MODES @@ -363,48 +339,24 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) }; /* Dual WS modes */ - const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name, NIX_RX_FASTPATH_MODES #undef R }; - const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = { -#define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name, - NIX_RX_FASTPATH_MODES -#undef R - }; - const event_dequeue_burst_t sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = { #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name, NIX_RX_FASTPATH_MODES @@ -436,31 +388,22 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) #undef T }; - event_dev->enqueue = cn9k_sso_hws_enq; event_dev->enqueue_burst = cn9k_sso_hws_enq_burst; event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst; event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst; event_dev->profile_switch = cn9k_sso_hws_profile_switch; if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg); CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_seg_burst); - if (dev->is_timeout_deq) { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_deq_tmo_seg); + if (dev->is_timeout_deq) CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_tmo_seg_burst); - } } else { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq); CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_burst); - if (dev->is_timeout_deq) { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_deq_tmo); + if (dev->is_timeout_deq) CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_tmo_burst); - } } event_dev->ca_enqueue = cn9k_sso_hws_ca_enq; event_dev->dma_enqueue = cn9k_dma_adapter_enqueue; @@ -473,7 +416,6 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) sso_hws_tx_adptr_enq); if (dev->dual_ws) { - event_dev->enqueue = cn9k_sso_hws_dual_enq; event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst; event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst; event_dev->enqueue_forward_burst = @@ -483,25 +425,17 @@ cn9k_sso_fp_tmplt_fns_set(struct rte_eventdev *event_dev) event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch; if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_dual_deq_seg); CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_dual_deq_seg_burst); if (dev->is_timeout_deq) { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_dual_deq_tmo_seg); CN9K_SET_EVDEV_DEQ_OP( dev, event_dev->dequeue_burst, sso_hws_dual_deq_tmo_seg_burst); } } else { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_dual_deq); CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_dual_deq_burst); if (dev->is_timeout_deq) { - CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, - sso_hws_dual_deq_tmo); CN9K_SET_EVDEV_DEQ_OP( dev, event_dev->dequeue_burst, sso_hws_dual_deq_tmo_burst); @@ -531,12 +465,9 @@ cn9k_sso_fp_blk_fns_set(struct rte_eventdev *event_dev) #if defined(CNXK_DIS_TMPLT_FUNC) struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); - event_dev->dequeue = cn9k_sso_hws_deq_all_offload; event_dev->dequeue_burst = cn9k_sso_hws_deq_burst_all_offload; - if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) { - event_dev->dequeue = cn9k_sso_hws_deq_all_offload_tst; + if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) event_dev->dequeue_burst = cn9k_sso_hws_deq_burst_all_offload_tst; - } event_dev->txa_enqueue = cn9k_sso_hws_tx_adptr_enq_seg_all_offload; event_dev->txa_enqueue_same_dest = cn9k_sso_hws_tx_adptr_enq_seg_all_offload; if (dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F) { @@ -544,12 +475,9 @@ cn9k_sso_fp_blk_fns_set(struct rte_eventdev *event_dev) event_dev->txa_enqueue_same_dest = cn9k_sso_hws_tx_adptr_enq_seg_all_offload_tst; } if (dev->dual_ws) { - event_dev->dequeue = cn9k_sso_hws_deq_dual_all_offload; event_dev->dequeue_burst = cn9k_sso_hws_deq_dual_burst_all_offload; - if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) { - event_dev->dequeue = cn9k_sso_hws_deq_dual_all_offload_tst; + if (dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F) event_dev->dequeue_burst = cn9k_sso_hws_deq_dual_burst_all_offload_tst; - } event_dev->txa_enqueue = cn9k_sso_hws_tx_adptr_enq_dual_seg_all_offload; event_dev->txa_enqueue_same_dest = cn9k_sso_hws_tx_adptr_enq_dual_seg_all_offload; if (dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F) { @@ -573,7 +501,6 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) cn9k_sso_fp_blk_fns_set(event_dev); cn9k_sso_fp_tmplt_fns_set(event_dev); - event_dev->enqueue = cn9k_sso_hws_enq; event_dev->enqueue_burst = cn9k_sso_hws_enq_burst; event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst; event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst; @@ -581,7 +508,6 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) event_dev->profile_switch = cn9k_sso_hws_profile_switch; if (dev->dual_ws) { - event_dev->enqueue = cn9k_sso_hws_dual_enq; event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst; event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst; event_dev->enqueue_forward_burst = cn9k_sso_hws_dual_enq_fwd_burst; @@ -896,14 +822,14 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) struct cn9k_sso_hws_dual *dws = event_dev->data->ports[i]; dws->xaq_lmt = dev->xaq_lmt; - dws->fc_mem = (uint64_t *)dev->fc_iova; + dws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova; dws->tstamp = dev->tstamp; if (lookup_mem) dws->lookup_mem = lookup_mem; } else { struct cn9k_sso_hws *ws = event_dev->data->ports[i]; ws->xaq_lmt = dev->xaq_lmt; - ws->fc_mem = (uint64_t *)dev->fc_iova; + ws->fc_mem = (uint64_t __rte_atomic *)dev->fc_iova; ws->tstamp = dev->tstamp; if (lookup_mem) ws->lookup_mem = lookup_mem; @@ -917,7 +843,6 @@ eventdev_fops_tstamp_update(struct rte_eventdev *event_dev) struct rte_event_fp_ops *fp_op = rte_event_fp_ops + event_dev->data->dev_id; - fp_op->dequeue = event_dev->dequeue; fp_op->dequeue_burst = event_dev->dequeue_burst; } diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c index a9ac49a5a7..86aa3f1c30 100644 --- a/drivers/event/cnxk/cn9k_worker.c +++ b/drivers/event/cnxk/cn9k_worker.c @@ -8,10 +8,13 @@ #include "cn9k_cryptodev_ops.h" uint16_t __rte_hot -cn9k_sso_hws_enq(void *port, const struct rte_event *ev) +cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[], + uint16_t nb_events) { struct cn9k_sso_hws *ws = port; + RTE_SET_USED(nb_events); + switch (ev->op) { case RTE_EVENT_OP_NEW: return cn9k_sso_hws_new_event(ws, ev); @@ -33,14 +36,6 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev) return 1; } -uint16_t __rte_hot -cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[], - uint16_t nb_events) -{ - RTE_SET_USED(nb_events); - return cn9k_sso_hws_enq(port, ev); -} - uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events) @@ -80,11 +75,14 @@ cn9k_sso_hws_profile_switch(void *port, uint8_t profile) /* Dual ws ops. */ uint16_t __rte_hot -cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev) +cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[], + uint16_t nb_events) { struct cn9k_sso_hws_dual *dws = port; uint64_t base; + RTE_SET_USED(nb_events); + base = dws->base[!dws->vws]; switch (ev->op) { case RTE_EVENT_OP_NEW: @@ -107,14 +105,6 @@ cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev) return 1; } -uint16_t __rte_hot -cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[], - uint16_t nb_events) -{ - RTE_SET_USED(nb_events); - return cn9k_sso_hws_dual_enq(port, ev); -} - uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events) diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h index c92fa72f11..10abbdfbb5 100644 --- a/drivers/event/cnxk/cn9k_worker.h +++ b/drivers/event/cnxk/cn9k_worker.h @@ -28,7 +28,7 @@ cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev) const uint64_t event_ptr = ev->u64; const uint16_t grp = ev->queue_id; - rte_atomic_thread_fence(__ATOMIC_ACQ_REL); + rte_atomic_thread_fence(rte_memory_order_acq_rel); if (ws->xaq_lmt <= *ws->fc_mem) return 0; @@ -71,7 +71,7 @@ cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const struct rte_event *ev) const uint64_t event_ptr = ev->u64; const uint16_t grp = ev->queue_id; - while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED)) + while (ws->xaq_lmt <= rte_atomic_load_explicit(ws->fc_mem, rte_memory_order_relaxed)) ; cnxk_sso_hws_add_work(event_ptr, tag, new_tt, @@ -93,7 +93,7 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev) * Use add_work operation to transfer the event to * new group/core */ - rte_atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); roc_sso_hws_head_wait(ws->base); cn9k_sso_hws_new_event_wait(ws, ev); } @@ -110,7 +110,7 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws, const uint64_t event_ptr = ev->u64; const uint16_t grp = ev->queue_id; - rte_atomic_thread_fence(__ATOMIC_ACQ_REL); + rte_atomic_thread_fence(rte_memory_order_acq_rel); if (dws->xaq_lmt <= *dws->fc_mem) return 0; @@ -128,7 +128,7 @@ cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual *dws, const uint64_t event_ptr = ev->u64; const uint16_t grp = ev->queue_id; - while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED)) + while (dws->xaq_lmt <= rte_atomic_load_explicit(dws->fc_mem, rte_memory_order_relaxed)) ; cnxk_sso_hws_add_work(event_ptr, tag, new_tt, @@ -151,7 +151,7 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base, * Use add_work operation to transfer the event to * new group/core */ - rte_atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); roc_sso_hws_head_wait(base); cn9k_sso_hws_dual_new_event_wait(dws, ev); } @@ -359,7 +359,6 @@ cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev, } /* CN9K Fastpath functions. */ -uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev); uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events); @@ -371,8 +370,6 @@ uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port, uint16_t nb_events); int __rte_hot cn9k_sso_hws_profile_switch(void *port, uint8_t profile); -uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port, - const struct rte_event *ev); uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events); @@ -389,23 +386,15 @@ uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], int __rte_hot cn9k_sso_hws_dual_profile_switch(void *port, uint8_t profile); #define R(name, flags) \ - uint16_t __rte_hot cn9k_sso_hws_deq_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); @@ -413,119 +402,114 @@ int __rte_hot cn9k_sso_hws_dual_profile_switch(void *port, uint8_t profile); NIX_RX_FASTPATH_MODES #undef R -#define SSO_DEQ(fn, flags) \ - uint16_t __rte_hot fn(void *port, struct rte_event *ev, \ - uint64_t timeout_ticks) \ - { \ - struct cn9k_sso_hws *ws = port; \ - RTE_SET_USED(timeout_ticks); \ - if (ws->swtag_req) { \ - ws->swtag_req = 0; \ - cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \ - return 1; \ - } \ - return cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \ +#define SSO_DEQ(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws *ws = port; \ + RTE_SET_USED(timeout_ticks); \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \ + return 1; \ + } \ + return cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \ } #define SSO_DEQ_SEG(fn, flags) SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F) -#define SSO_DEQ_TMO(fn, flags) \ - uint16_t __rte_hot fn(void *port, struct rte_event *ev, \ - uint64_t timeout_ticks) \ - { \ - struct cn9k_sso_hws *ws = port; \ - uint16_t ret = 1; \ - uint64_t iter; \ - if (ws->swtag_req) { \ - ws->swtag_req = 0; \ - cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \ - return ret; \ - } \ - ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \ - for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \ - ret = cn9k_sso_hws_get_work(ws, ev, flags, \ - ws->lookup_mem); \ - return ret; \ +#define SSO_DEQ_TMO(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws *ws = port; \ + uint16_t ret = 1; \ + uint64_t iter; \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \ + return ret; \ + } \ + ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \ + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \ + ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \ + return ret; \ } #define SSO_DEQ_TMO_SEG(fn, flags) SSO_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F) -#define R(name, flags) \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \ - void *port, struct rte_event ev[], uint16_t nb_events, \ - uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \ - void *port, struct rte_event ev[], uint16_t nb_events, \ - uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \ - void *port, struct rte_event ev[], uint16_t nb_events, \ - uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \ - void *port, struct rte_event *ev, uint64_t timeout_ticks); \ - uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \ - void *port, struct rte_event ev[], uint16_t nb_events, \ - uint64_t timeout_ticks); +#define R(name, flags) \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); NIX_RX_FASTPATH_MODES #undef R -#define SSO_DUAL_DEQ(fn, flags) \ - uint16_t __rte_hot fn(void *port, struct rte_event *ev, \ - uint64_t timeout_ticks) \ - { \ - struct cn9k_sso_hws_dual *dws = port; \ - uint16_t gw; \ - RTE_SET_USED(timeout_ticks); \ - if (dws->swtag_req) { \ - dws->swtag_req = 0; \ - cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \ - SSOW_LF_GWS_TAG); \ - return 1; \ - } \ - gw = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \ - dws->base[!dws->vws], ev, \ - flags, dws); \ - dws->vws = !dws->vws; \ - return gw; \ +#define SSO_DUAL_DEQ(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws_dual *dws = port; \ + uint16_t gw; \ + RTE_SET_USED(timeout_ticks); \ + if (dws->swtag_req) { \ + dws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + SSOW_LF_GWS_TAG); \ + return 1; \ + } \ + gw = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], dws->base[!dws->vws], ev, \ + flags, dws); \ + dws->vws = !dws->vws; \ + return gw; \ } #define SSO_DUAL_DEQ_SEG(fn, flags) SSO_DUAL_DEQ(fn, flags | NIX_RX_MULTI_SEG_F) -#define SSO_DUAL_DEQ_TMO(fn, flags) \ - uint16_t __rte_hot fn(void *port, struct rte_event *ev, \ - uint64_t timeout_ticks) \ - { \ - struct cn9k_sso_hws_dual *dws = port; \ - uint16_t ret = 1; \ - uint64_t iter; \ - if (dws->swtag_req) { \ - dws->swtag_req = 0; \ - cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \ - SSOW_LF_GWS_TAG); \ - return ret; \ - } \ - ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \ - dws->base[!dws->vws], ev, \ - flags, dws); \ - dws->vws = !dws->vws; \ - for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \ - ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \ - dws->base[!dws->vws], \ - ev, flags, dws); \ - dws->vws = !dws->vws; \ - } \ - return ret; \ +#define SSO_DUAL_DEQ_TMO(fn, flags) \ + static __rte_always_inline uint16_t fn(void *port, struct rte_event *ev, \ + uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws_dual *dws = port; \ + uint16_t ret = 1; \ + uint64_t iter; \ + if (dws->swtag_req) { \ + dws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + SSOW_LF_GWS_TAG); \ + return ret; \ + } \ + ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], dws->base[!dws->vws], ev, \ + flags, dws); \ + dws->vws = !dws->vws; \ + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \ + ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \ + dws->base[!dws->vws], ev, flags, dws); \ + dws->vws = !dws->vws; \ + } \ + return ret; \ } #define SSO_DUAL_DEQ_TMO_SEG(fn, flags) \ SSO_DUAL_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F) +#define R(name, flags) \ + SSO_DEQ(cn9k_sso_hws_deq_##name, flags) \ + SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) \ + SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) \ + SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) \ + SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) \ + SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) \ + SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) + +NIX_RX_FASTPATH_MODES +#undef R + #define SSO_CMN_DEQ_BURST(fnb, fn, flags) \ uint16_t __rte_hot fnb(void *port, struct rte_event ev[], \ uint16_t nb_events, uint64_t timeout_ticks) \ @@ -542,12 +526,6 @@ NIX_RX_FASTPATH_MODES return fn(port, ev, timeout_ticks); \ } -uint16_t __rte_hot cn9k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, - uint64_t timeout_ticks); - -uint16_t __rte_hot cn9k_sso_hws_deq_dual_all_offload(void *port, struct rte_event *ev, - uint64_t timeout_ticks); - uint16_t __rte_hot cn9k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); @@ -555,12 +533,6 @@ uint16_t __rte_hot cn9k_sso_hws_deq_dual_burst_all_offload(void *port, struct rt uint16_t nb_events, uint64_t timeout_ticks); -uint16_t __rte_hot cn9k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, - uint64_t timeout_ticks); - -uint16_t __rte_hot cn9k_sso_hws_deq_dual_all_offload_tst(void *port, struct rte_event *ev, - uint64_t timeout_ticks); - uint16_t __rte_hot cn9k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); @@ -599,7 +571,9 @@ cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq) : "memory"); #else do { - avail = txq->nb_sqb_bufs_adj - __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED); + avail = txq->nb_sqb_bufs_adj - + rte_atomic_load_explicit((uint64_t __rte_atomic *)txq->fc_mem, + rte_memory_order_relaxed); } while (((avail << txq->sqes_per_sqb_log2) - avail) <= 0); #endif } @@ -768,7 +742,8 @@ static __rte_always_inline int32_t cn9k_sso_sq_depth(const struct cn9k_eth_txq *txq) { int32_t avail = (int32_t)txq->nb_sqb_bufs_adj - - (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED); + (int32_t)rte_atomic_load_explicit((uint64_t __rte_atomic *)txq->fc_mem, + rte_memory_order_relaxed); return (avail << txq->sqes_per_sqb_log2) - avail; } diff --git a/drivers/event/cnxk/cnxk_common.h b/drivers/event/cnxk/cnxk_common.h new file mode 100644 index 0000000000..c361d0530d --- /dev/null +++ b/drivers/event/cnxk/cnxk_common.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef __CNXK_COMMON_H__ +#define __CNXK_COMMON_H__ + +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +struct cnxk_sso_hws_prf { + uint64_t base; + uint32_t gw_wdata; + void *lookup_mem; + uint64_t gw_rdata; + uint8_t swtag_req; + uint8_t hws_id; +}; + +static uint32_t +cnxk_sso_hws_prf_wdata(struct cnxk_sso_evdev *dev) +{ + uint32_t wdata = 1; + + if (dev->deq_tmo_ns) + wdata |= BIT(16); + + switch (dev->gw_mode) { + case CNXK_GW_MODE_NONE: + default: + break; + case CNXK_GW_MODE_PREF: + wdata |= BIT(19); + break; + case CNXK_GW_MODE_PREF_WFE: + wdata |= BIT(20) | BIT(19); + break; + } + + return wdata; +} + +static uint8_t +cnxk_sso_hws_preschedule_get(uint8_t preschedule_type) +{ + uint8_t gw_mode = 0; + + switch (preschedule_type) { + default: + case RTE_EVENT_PRESCHEDULE_NONE: + gw_mode = CNXK_GW_MODE_NONE; + break; + case RTE_EVENT_PRESCHEDULE: + gw_mode = CNXK_GW_MODE_PREF; + break; + case RTE_EVENT_PRESCHEDULE_ADAPTIVE: + gw_mode = CNXK_GW_MODE_PREF_WFE; + break; + } + + return gw_mode; +} + +static void +cnxk_sso_hws_reset(void *arg, void *ws) +{ + struct cnxk_sso_evdev *dev = arg; + struct cnxk_sso_hws_prf *ws_prf; + uint64_t pend_state; + uint8_t swtag_req; + uintptr_t base; + uint8_t hws_id; + union { + __uint128_t wdata; + uint64_t u64[2]; + } gw; + uint8_t pend_tt; + bool is_pend; + + ws_prf = ws; + base = ws_prf->base; + hws_id = ws_prf->hws_id; + swtag_req = ws_prf->swtag_req; + + roc_sso_hws_gwc_invalidate(&dev->sso, &hws_id, 1); + plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); + /* Wait till getwork/swtp/waitw/desched completes. */ + is_pend = false; + /* Work in WQE0 is always consumed, unless its a SWTAG. */ + pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) || swtag_req) + is_pend = true; + + do { + pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + } while (pend_state & + (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54))); + pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0)); + if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */ + if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED) + cnxk_sso_hws_swtag_untag(base + SSOW_LF_GWS_OP_SWTAG_UNTAG); + plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); + } else if (pend_tt != SSO_TT_EMPTY) { + plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + } + + /* Wait for desched to complete. */ + do { + pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + } while (pend_state & (BIT_ULL(58) | BIT_ULL(56))); + + switch (dev->gw_mode) { + case CNXK_GW_MODE_PREF: + case CNXK_GW_MODE_PREF_WFE: + while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63)) + ; + break; + case CNXK_GW_MODE_NONE: + default: + break; + } + + if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) != SSO_TT_EMPTY) { + plt_write64(BIT_ULL(16) | 1, base + SSOW_LF_GWS_OP_GET_WORK0); + do { + roc_load_pair(gw.u64[0], gw.u64[1], base + SSOW_LF_GWS_WQE0); + } while (gw.u64[0] & BIT_ULL(63)); + pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0)); + if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */ + if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED) + cnxk_sso_hws_swtag_untag(base + SSOW_LF_GWS_OP_SWTAG_UNTAG); + plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED); + } + } + + plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); + roc_sso_hws_gwc_invalidate(&dev->sso, &hws_id, 1); + rte_mb(); +} + +static void +cnxk_sso_configure_queue_stash(struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct roc_sso_hwgrp_stash stash[dev->stash_cnt]; + int i, rc; + + plt_sso_dbg(); + for (i = 0; i < dev->stash_cnt; i++) { + stash[i].hwgrp = dev->stash_parse_data[i].queue; + stash[i].stash_offset = dev->stash_parse_data[i].stash_offset; + stash[i].stash_count = dev->stash_parse_data[i].stash_length; + } + rc = roc_sso_hwgrp_stash_config(&dev->sso, stash, dev->stash_cnt); + if (rc < 0) + plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc); +} + +#endif /* __CNXK_COMMON_H__ */ diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c index 84a55511a3..be6a487b59 100644 --- a/drivers/event/cnxk/cnxk_eventdev.c +++ b/drivers/event/cnxk/cnxk_eventdev.c @@ -2,7 +2,7 @@ * Copyright(C) 2021 Marvell. */ -#include "roc_npa.h" +#include "roc_api.h" #include "cnxk_eventdev.h" #include "cnxk_eventdev_dp.h" @@ -47,7 +47,7 @@ cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev) if (dev->num_events > 0) xae_cnt = dev->num_events; else - xae_cnt = dev->sso.iue; + xae_cnt = dev->sso.feat.iue; if (dev->xae_cnt) xae_cnt += dev->xae_cnt; @@ -624,8 +624,8 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) &dev->force_ena_bp); rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, &single_ws); - rte_kvargs_process(kvlist, CN10K_SSO_STASH, - &parse_sso_kvargs_stash_dict, dev); + rte_kvargs_process(kvlist, CNXK_SSO_STASH, &parse_sso_kvargs_stash_dict, + dev); dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); } diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index f147ef3c78..33b3538753 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -21,16 +21,13 @@ #include "cnxk_eventdev_dp.h" -#include "roc_platform.h" -#include "roc_sso.h" - #include "cnxk_tim_evdev.h" #define CNXK_SSO_XAE_CNT "xae_cnt" #define CNXK_SSO_GGRP_QOS "qos" #define CNXK_SSO_FORCE_BP "force_rx_bp" #define CN9K_SSO_SINGLE_WS "single_ws" -#define CN10K_SSO_STASH "stash" +#define CNXK_SSO_STASH "stash" #define CNXK_SSO_MAX_PROFILES 2 @@ -41,9 +38,9 @@ #define CN9K_SSOW_GET_BASE_ADDR(_GW) ((_GW)-SSOW_LF_GWS_OP_GET_WORK0) #define CN9K_DUAL_WS_NB_WS 2 -#define CN10K_GW_MODE_NONE 0 -#define CN10K_GW_MODE_PREF 1 -#define CN10K_GW_MODE_PREF_WFE 2 +#define CNXK_GW_MODE_NONE 0 +#define CNXK_GW_MODE_PREF 1 +#define CNXK_GW_MODE_PREF_WFE 2 #define CNXK_QOS_NORMALIZE(val, min, max, cnt) \ (min + val / ((max + cnt - 1) / cnt)) @@ -136,7 +133,7 @@ struct __rte_cache_aligned cn9k_sso_hws { struct cnxk_timesync_info **tstamp; /* Add Work Fastpath data */ alignas(RTE_CACHE_LINE_SIZE) uint64_t xaq_lmt; - uint64_t *fc_mem; + uint64_t __rte_atomic *fc_mem; uintptr_t grp_base; /* Tx Fastpath data */ alignas(RTE_CACHE_LINE_SIZE) uint64_t lso_tun_fmt; @@ -154,7 +151,7 @@ struct __rte_cache_aligned cn9k_sso_hws_dual { struct cnxk_timesync_info **tstamp; /* Add Work Fastpath data */ alignas(RTE_CACHE_LINE_SIZE) uint64_t xaq_lmt; - uint64_t *fc_mem; + uint64_t __rte_atomic *fc_mem; uintptr_t grp_base; /* Tx Fastpath data */ alignas(RTE_CACHE_LINE_SIZE) uint64_t lso_tun_fmt; @@ -269,6 +266,9 @@ int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev); int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev); +void cnxk_sso_tstamp_cfg(uint16_t port_id, const struct rte_eth_dev *eth_dev, + struct cnxk_sso_evdev *dev); +int cnxk_sso_rxq_disable(const struct rte_eth_dev *eth_dev, uint16_t rq_id); int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, int32_t tx_queue_id); diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c index 3cac42111a..4cf48db74c 100644 --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -167,9 +167,10 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id, return rc; } -static int -cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id) +int +cnxk_sso_rxq_disable(const struct rte_eth_dev *eth_dev, uint16_t rq_id) { + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; struct roc_nix_rq *rq; rq = &cnxk_eth_dev->rqs[rq_id]; @@ -209,10 +210,11 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev, return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0); } -static void -cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev, - struct cnxk_sso_evdev *dev) +void +cnxk_sso_tstamp_cfg(uint16_t port_id, const struct rte_eth_dev *eth_dev, struct cnxk_sso_evdev *dev) { + struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || cnxk_eth_dev->ptp_en) dev->tstamp[port_id] = &cnxk_eth_dev->tstamp; } @@ -263,7 +265,7 @@ cnxk_sso_rx_adapter_queue_add( /* Propagate force bp devarg */ cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp; - cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev); + cnxk_sso_tstamp_cfg(eth_dev->data->port_id, eth_dev, dev); cnxk_eth_dev->nb_rxq_sso++; } @@ -290,7 +292,7 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev, for (i = 0; i < eth_dev->data->nb_rx_queues; i++) cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, i); } else { - rc = cnxk_sso_rxq_disable(cnxk_eth_dev, (uint16_t)rx_queue_id); + rc = cnxk_sso_rxq_disable(eth_dev, (uint16_t)rx_queue_id); cnxk_eth_dev->nb_rxq_sso--; /* Enable drop_re if it was disabled earlier */ diff --git a/drivers/event/cnxk/cnxk_eventdev_selftest.c b/drivers/event/cnxk/cnxk_eventdev_selftest.c index 95c0f1b1f7..8f3d0982e9 100644 --- a/drivers/event/cnxk/cnxk_eventdev_selftest.c +++ b/drivers/event/cnxk/cnxk_eventdev_selftest.c @@ -18,6 +18,8 @@ #include #include +#include "roc_api.h" + #include "cnxk_eventdev.h" #include "cnxk_eventdev_dp.h" @@ -63,7 +65,7 @@ seqn_list_update(int val) return -1; seqn_list[seqn_list_index++] = val; - rte_atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); return 0; } @@ -82,7 +84,7 @@ seqn_list_check(int limit) } struct test_core_param { - uint32_t *total_events; + uint32_t __rte_atomic *total_events; uint64_t dequeue_tmo_ticks; uint8_t port; uint8_t sched_type; @@ -540,13 +542,13 @@ static int worker_multi_port_fn(void *arg) { struct test_core_param *param = arg; - uint32_t *total_events = param->total_events; + uint32_t __rte_atomic *total_events = param->total_events; uint8_t port = param->port; uint16_t valid_event; struct rte_event ev; int ret; - while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) { + while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) > 0) { valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); if (!valid_event) continue; @@ -554,30 +556,30 @@ worker_multi_port_fn(void *arg) ret = validate_event(&ev); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event"); rte_pktmbuf_free(ev.mbuf); - __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(total_events, 1, rte_memory_order_relaxed); } return 0; } static inline int -wait_workers_to_join(const uint32_t *count) +wait_workers_to_join(const uint32_t __rte_atomic *count) { uint64_t cycles, print_cycles; cycles = rte_get_timer_cycles(); print_cycles = cycles; - while (__atomic_load_n(count, __ATOMIC_RELAXED)) { + while (rte_atomic_load_explicit(count, rte_memory_order_relaxed)) { uint64_t new_cycles = rte_get_timer_cycles(); if (new_cycles - print_cycles > rte_get_timer_hz()) { plt_info("Events %d", - __atomic_load_n(count, __ATOMIC_RELAXED)); + rte_atomic_load_explicit(count, rte_memory_order_relaxed)); print_cycles = new_cycles; } if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) { plt_err("No schedules for seconds, deadlock (%d)", - __atomic_load_n(count, __ATOMIC_RELAXED)); + rte_atomic_load_explicit(count, rte_memory_order_relaxed)); rte_event_dev_dump(evdev, stdout); cycles = new_cycles; return -1; @@ -593,7 +595,7 @@ launch_workers_and_wait(int (*main_thread)(void *), int (*worker_thread)(void *), uint32_t total_events, uint8_t nb_workers, uint8_t sched_type) { - uint32_t atomic_total_events; + uint32_t __rte_atomic atomic_total_events; struct test_core_param *param; uint64_t dequeue_tmo_ticks; uint8_t port = 0; @@ -603,7 +605,7 @@ launch_workers_and_wait(int (*main_thread)(void *), if (!nb_workers) return 0; - __atomic_store_n(&atomic_total_events, total_events, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&atomic_total_events, total_events, rte_memory_order_relaxed); seqn_list_init(); param = malloc(sizeof(struct test_core_param) * nb_workers); @@ -640,7 +642,7 @@ launch_workers_and_wait(int (*main_thread)(void *), param[port].sched_type = sched_type; param[port].port = port; param[port].dequeue_tmo_ticks = dequeue_tmo_ticks; - rte_atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); w_lcore = rte_get_next_lcore(w_lcore, 1, 0); if (w_lcore == RTE_MAX_LCORE) { plt_err("Failed to get next available lcore"); @@ -651,7 +653,7 @@ launch_workers_and_wait(int (*main_thread)(void *), rte_eal_remote_launch(worker_thread, ¶m[port], w_lcore); } - rte_atomic_thread_fence(__ATOMIC_RELEASE); + rte_atomic_thread_fence(rte_memory_order_release); ret = wait_workers_to_join(&atomic_total_events); free(param); @@ -890,13 +892,13 @@ worker_flow_based_pipeline(void *arg) { struct test_core_param *param = arg; uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; - uint32_t *total_events = param->total_events; + uint32_t __rte_atomic *total_events = param->total_events; uint8_t new_sched_type = param->sched_type; uint8_t port = param->port; uint16_t valid_event; struct rte_event ev; - while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) { + while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) > 0) { valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, dequeue_tmo_ticks); if (!valid_event) @@ -916,8 +918,8 @@ worker_flow_based_pipeline(void *arg) if (seqn_list_update(seqn) == 0) { rte_pktmbuf_free(ev.mbuf); - __atomic_fetch_sub(total_events, 1, - __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(total_events, 1, + rte_memory_order_relaxed); } else { plt_err("Failed to update seqn_list"); return -1; @@ -1046,13 +1048,13 @@ worker_group_based_pipeline(void *arg) { struct test_core_param *param = arg; uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; - uint32_t *total_events = param->total_events; + uint32_t __rte_atomic *total_events = param->total_events; uint8_t new_sched_type = param->sched_type; uint8_t port = param->port; uint16_t valid_event; struct rte_event ev; - while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) { + while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) > 0) { valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, dequeue_tmo_ticks); if (!valid_event) @@ -1072,8 +1074,8 @@ worker_group_based_pipeline(void *arg) if (seqn_list_update(seqn) == 0) { rte_pktmbuf_free(ev.mbuf); - __atomic_fetch_sub(total_events, 1, - __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(total_events, 1, + rte_memory_order_relaxed); } else { plt_err("Failed to update seqn_list"); return -1; @@ -1205,19 +1207,19 @@ static int worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg) { struct test_core_param *param = arg; - uint32_t *total_events = param->total_events; + uint32_t __rte_atomic *total_events = param->total_events; uint8_t port = param->port; uint16_t valid_event; struct rte_event ev; - while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) { + while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) > 0) { valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); if (!valid_event) continue; if (ev.sub_event_type == MAX_STAGES) { /* last stage */ rte_pktmbuf_free(ev.mbuf); - __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(total_events, 1, rte_memory_order_relaxed); } else { ev.event_type = RTE_EVENT_TYPE_CPU; ev.sub_event_type++; @@ -1284,16 +1286,16 @@ worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg) &queue_count), "Queue count get failed"); uint8_t nr_queues = queue_count; - uint32_t *total_events = param->total_events; + uint32_t __rte_atomic *total_events = param->total_events; - while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) { + while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) > 0) { valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); if (!valid_event) continue; if (ev.queue_id == nr_queues - 1) { /* last stage */ rte_pktmbuf_free(ev.mbuf); - __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(total_events, 1, rte_memory_order_relaxed); } else { ev.event_type = RTE_EVENT_TYPE_CPU; ev.queue_id++; @@ -1329,16 +1331,16 @@ worker_mixed_pipeline_max_stages_rand_sched_type(void *arg) &queue_count), "Queue count get failed"); uint8_t nr_queues = queue_count; - uint32_t *total_events = param->total_events; + uint32_t __rte_atomic *total_events = param->total_events; - while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) { + while (rte_atomic_load_explicit(total_events, rte_memory_order_relaxed) > 0) { valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); if (!valid_event) continue; if (ev.queue_id == nr_queues - 1) { /* Last stage */ rte_pktmbuf_free(ev.mbuf); - __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(total_events, 1, rte_memory_order_relaxed); } else { ev.event_type = RTE_EVENT_TYPE_CPU; ev.queue_id++; @@ -1564,17 +1566,17 @@ cnxk_sso_selftest(const char *dev_name) return rc; } - if (roc_model_runtime_is_cn10k()) { - printf("Verifying CN10K workslot getwork mode none\n"); - dev->gw_mode = CN10K_GW_MODE_NONE; + if (roc_model_runtime_is_cn10k() || roc_model_runtime_is_cn20k()) { + printf("Verifying %s workslot getwork mode none\n", dev_name); + dev->gw_mode = CNXK_GW_MODE_NONE; if (cnxk_sso_testsuite_run(dev_name)) return rc; - printf("Verifying CN10K workslot getwork mode prefetch\n"); - dev->gw_mode = CN10K_GW_MODE_PREF; + printf("Verifying %s workslot getwork mode prefetch\n", dev_name); + dev->gw_mode = CNXK_GW_MODE_PREF; if (cnxk_sso_testsuite_run(dev_name)) return rc; - printf("Verifying CN10K workslot getwork mode smart prefetch\n"); - dev->gw_mode = CN10K_GW_MODE_PREF_WFE; + printf("Verifying %s workslot getwork mode smart prefetch\n", dev_name); + dev->gw_mode = CNXK_GW_MODE_PREF_WFE; if (cnxk_sso_testsuite_run(dev_name)) return rc; } diff --git a/drivers/event/cnxk/cnxk_eventdev_stats.c b/drivers/event/cnxk/cnxk_eventdev_stats.c index a8a87a06e4..6dea91aedf 100644 --- a/drivers/event/cnxk/cnxk_eventdev_stats.c +++ b/drivers/event/cnxk/cnxk_eventdev_stats.c @@ -2,6 +2,8 @@ * Copyright(C) 2021 Marvell. */ +#include "roc_api.h" + #include "cnxk_eventdev.h" #include "cnxk_eventdev_dp.h" diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c index bba70646fa..994d1d1090 100644 --- a/drivers/event/cnxk/cnxk_tim_evdev.c +++ b/drivers/event/cnxk/cnxk_tim_evdev.c @@ -4,7 +4,7 @@ #include -#include "roc_npa.h" +#include "roc_api.h" #include "cnxk_eventdev.h" #include "cnxk_tim_evdev.h" @@ -78,9 +78,25 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring, return rc; } +static int +cnxk_tim_enable_hwwqe(struct cnxk_tim_evdev *dev, struct cnxk_tim_ring *tim_ring) +{ + struct roc_tim_hwwqe_cfg hwwqe_cfg; + + memset(&hwwqe_cfg, 0, sizeof(hwwqe_cfg)); + hwwqe_cfg.hwwqe_ena = 1; + hwwqe_cfg.grp_ena = 0; + hwwqe_cfg.flw_ctrl_ena = 0; + hwwqe_cfg.result_offset = CNXK_TIM_HWWQE_RES_OFFSET_B; + + tim_ring->lmt_base = dev->tim.roc_sso->lmt_base; + return roc_tim_lf_config_hwwqe(&dev->tim, tim_ring->ring_id, &hwwqe_cfg); +} + static void cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring) { + struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); uint8_t prod_flag = !tim_ring->prod_type_sp; /* [STATS] [DFB/FB] [SP][MP]*/ @@ -98,6 +114,16 @@ cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring) #undef FP }; + if (dev == NULL) + return; + + if (dev->tim.feat.hwwqe) { + cnxk_tim_ops.arm_burst = cnxk_tim_arm_burst_hwwqe; + cnxk_tim_ops.arm_tmo_tick_burst = cnxk_tim_arm_tmo_burst_hwwqe; + cnxk_tim_ops.cancel_burst = cnxk_tim_timer_cancel_burst_hwwqe; + return; + } + cnxk_tim_ops.arm_burst = arm_burst[tim_ring->enable_stats][tim_ring->ena_dfb][prod_flag]; cnxk_tim_ops.arm_tmo_tick_burst = @@ -224,12 +250,13 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr) } } - if (tim_ring->disable_npa) { + if (!dev->tim.feat.hwwqe && tim_ring->disable_npa) { tim_ring->nb_chunks = tim_ring->nb_timers / CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz); tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts; } else { + tim_ring->disable_npa = 0; tim_ring->nb_chunks = tim_ring->nb_timers; } @@ -255,6 +282,14 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr) goto tim_chnk_free; } + if (dev->tim.feat.hwwqe) { + rc = cnxk_tim_enable_hwwqe(dev, tim_ring); + if (rc < 0) { + plt_err("Failed to enable hwwqe"); + goto tim_chnk_free; + } + } + plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE); plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA); @@ -358,7 +393,7 @@ cnxk_tim_stats_get(const struct rte_event_timer_adapter *adapter, tim_ring->tick_fn(tim_ring->tbase) - tim_ring->ring_start_cyc; stats->evtim_exp_count = - __atomic_load_n(&tim_ring->arm_cnt, __ATOMIC_RELAXED); + rte_atomic_load_explicit(&tim_ring->arm_cnt, rte_memory_order_relaxed); stats->ev_enq_count = stats->evtim_exp_count; stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div); @@ -370,7 +405,7 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter) { struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv; - __atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tim_ring->arm_cnt, 0, rte_memory_order_relaxed); return 0; } diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h index 6cf10dbf4d..114a89ee5a 100644 --- a/drivers/event/cnxk/cnxk_tim_evdev.h +++ b/drivers/event/cnxk/cnxk_tim_evdev.h @@ -15,11 +15,7 @@ #include #include #include - -#include "hw/tim.h" - -#include "roc_model.h" -#include "roc_tim.h" +#include #define NSECPERSEC 1E9 #define USECPERSEC 1E6 @@ -34,6 +30,8 @@ #define CNXK_TIM_MIN_CHUNK_SLOTS (0x1) #define CNXK_TIM_MAX_CHUNK_SLOTS (0x1FFE) #define CNXK_TIM_MAX_POOL_CACHE_SZ (16) +#define CNXK_TIM_HWWQE_RES_OFFSET_B (24) +#define CNXK_TIM_ENT_PER_LMT (7) #define CN9K_TIM_MIN_TMO_TKS (256) @@ -108,15 +106,15 @@ struct cnxk_tim_evdev { struct cnxk_tim_bkt { uint64_t first_chunk; union { - uint64_t w1; + uint64_t __rte_atomic w1; struct { - uint32_t nb_entry; + uint32_t __rte_atomic nb_entry; uint8_t sbt : 1; uint8_t hbt : 1; uint8_t bsk : 1; uint8_t rsvd : 5; - uint8_t lock; - int16_t chunk_remainder; + uint8_t __rte_atomic lock; + int16_t __rte_atomic chunk_remainder; }; }; uint64_t current_chunk; @@ -129,12 +127,13 @@ struct __rte_cache_aligned cnxk_tim_ring { uintptr_t tbase; uint64_t (*tick_fn)(uint64_t tbase); uint64_t ring_start_cyc; + uint64_t lmt_base; struct cnxk_tim_bkt *bkt; struct rte_mempool *chunk_pool; struct rte_reciprocal_u64 fast_div; struct rte_reciprocal_u64 fast_bkt; uint64_t tck_int; - uint64_t arm_cnt; + uint64_t __rte_atomic arm_cnt; uintptr_t base; uint8_t prod_type_sp; uint8_t enable_stats; @@ -315,11 +314,21 @@ TIM_ARM_FASTPATH_MODES TIM_ARM_TMO_FASTPATH_MODES #undef FP +uint16_t cnxk_tim_arm_burst_hwwqe(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); + +uint16_t cnxk_tim_arm_tmo_burst_hwwqe(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers); + uint16_t cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, struct rte_event_timer **tim, const uint16_t nb_timers); +uint16_t cnxk_tim_timer_cancel_burst_hwwqe(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers); + int cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, const struct rte_event_timer *evtim, uint64_t *ticks_remaining); diff --git a/drivers/event/cnxk/cnxk_tim_worker.c b/drivers/event/cnxk/cnxk_tim_worker.c index 1f2f2fe5d8..42d376d375 100644 --- a/drivers/event/cnxk/cnxk_tim_worker.c +++ b/drivers/event/cnxk/cnxk_tim_worker.c @@ -2,6 +2,8 @@ * Copyright(C) 2021 Marvell. */ +#include "roc_api.h" + #include "cnxk_tim_evdev.h" #include "cnxk_tim_worker.h" @@ -30,15 +32,6 @@ cnxk_tim_arm_checks(const struct cnxk_tim_ring *const tim_ring, return -EINVAL; } -static inline void -cnxk_tim_format_event(const struct rte_event_timer *const tim, - struct cnxk_tim_ent *const entry) -{ - entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | - (tim->ev.event & 0xFFFFFFFFF); - entry->wqe = tim->ev.u64; -} - static __rte_always_inline uint16_t cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr, struct rte_event_timer **tim, const uint16_t nb_timers, @@ -70,7 +63,25 @@ cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr, } if (flags & CNXK_TIM_ENA_STATS) - __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&tim_ring->arm_cnt, index, rte_memory_order_relaxed); + + return index; +} + +uint16_t +cnxk_tim_arm_burst_hwwqe(const struct rte_event_timer_adapter *adptr, struct rte_event_timer **tim, + const uint16_t nb_timers) +{ + struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv; + uint16_t index; + + for (index = 0; index < nb_timers; index++) { + if (cnxk_tim_arm_checks(tim_ring, tim[index])) + break; + + if (cnxk_tim_add_entry_hwwqe(tim_ring, tim[index])) + break; + } return index; } @@ -124,12 +135,35 @@ cnxk_tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr, } if (flags & CNXK_TIM_ENA_STATS) - __atomic_fetch_add(&tim_ring->arm_cnt, set_timers, - __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&tim_ring->arm_cnt, set_timers, + rte_memory_order_relaxed); return set_timers; } +uint16_t +cnxk_tim_arm_tmo_burst_hwwqe(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint64_t timeout_tick, + const uint16_t nb_timers) +{ + struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv; + uint16_t idx; + + if (unlikely(!timeout_tick || timeout_tick > tim_ring->nb_bkts)) { + const enum rte_event_timer_state state = timeout_tick ? + RTE_EVENT_TIMER_ERROR_TOOLATE : + RTE_EVENT_TIMER_ERROR_TOOEARLY; + for (idx = 0; idx < nb_timers; idx++) + tim[idx]->state = state; + + rte_errno = EINVAL; + return 0; + } + + return cnxk_tim_add_entry_tmo_hwwqe(tim_ring, tim, timeout_tick * tim_ring->tck_int, + nb_timers); +} + #define FP(_name, _f2, _f1, _flags) \ uint16_t __rte_noinline cnxk_tim_arm_tmo_tick_burst_##_name( \ const struct rte_event_timer_adapter *adptr, \ @@ -151,7 +185,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, int ret; RTE_SET_USED(adptr); - rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + rte_atomic_thread_fence(rte_memory_order_acquire); for (index = 0; index < nb_timers; index++) { if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) { rte_errno = EALREADY; @@ -172,6 +206,38 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr, return index; } +uint16_t +cnxk_tim_timer_cancel_burst_hwwqe(const struct rte_event_timer_adapter *adptr, + struct rte_event_timer **tim, const uint16_t nb_timers) +{ + uint64_t __rte_atomic *status; + uint16_t i; + + RTE_SET_USED(adptr); + for (i = 0; i < nb_timers; i++) { + if (tim[i]->state == RTE_EVENT_TIMER_CANCELED) { + rte_errno = EALREADY; + break; + } + + if (tim[i]->state != RTE_EVENT_TIMER_ARMED) { + rte_errno = EINVAL; + break; + } + + status = (uint64_t __rte_atomic *)&tim[i]->impl_opaque[1]; + if (!rte_atomic_compare_exchange_strong_explicit(status, (uint64_t *)&tim[i], 0, + rte_memory_order_release, + rte_memory_order_relaxed)) { + rte_errno = ENOENT; + break; + } + tim[i]->state = RTE_EVENT_TIMER_CANCELED; + } + + return i; +} + int cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, const struct rte_event_timer *evtim, uint64_t *ticks_remaining) @@ -193,7 +259,7 @@ cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter, return -ENOENT; bkt = (struct cnxk_tim_bkt *)evtim->impl_opaque[1]; - sema = __atomic_load_n(&bkt->w1, rte_memory_order_acquire); + sema = rte_atomic_load_explicit(&bkt->w1, rte_memory_order_acquire); if (cnxk_tim_bkt_get_hbt(sema) || !cnxk_tim_bkt_get_nent(sema)) return -ENOENT; diff --git a/drivers/event/cnxk/cnxk_tim_worker.h b/drivers/event/cnxk/cnxk_tim_worker.h index f530d8c5c4..6a9ee2fd52 100644 --- a/drivers/event/cnxk/cnxk_tim_worker.h +++ b/drivers/event/cnxk/cnxk_tim_worker.h @@ -23,19 +23,19 @@ cnxk_tim_bkt_fetch_rem(uint64_t w1) static inline int16_t cnxk_tim_bkt_get_rem(struct cnxk_tim_bkt *bktp) { - return __atomic_load_n(&bktp->chunk_remainder, __ATOMIC_ACQUIRE); + return rte_atomic_load_explicit(&bktp->chunk_remainder, rte_memory_order_acquire); } static inline void cnxk_tim_bkt_set_rem(struct cnxk_tim_bkt *bktp, uint16_t v) { - __atomic_store_n(&bktp->chunk_remainder, v, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&bktp->chunk_remainder, v, rte_memory_order_relaxed); } static inline void cnxk_tim_bkt_sub_rem(struct cnxk_tim_bkt *bktp, uint16_t v) { - __atomic_fetch_sub(&bktp->chunk_remainder, v, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v, rte_memory_order_relaxed); } static inline uint8_t @@ -56,20 +56,20 @@ cnxk_tim_bkt_clr_bsk(struct cnxk_tim_bkt *bktp) /* Clear everything except lock. */ const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK; - return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL); + return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel); } static inline uint64_t cnxk_tim_bkt_fetch_sema_lock(struct cnxk_tim_bkt *bktp) { - return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK, - __ATOMIC_ACQUIRE); + return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK, + rte_memory_order_acquire); } static inline uint64_t cnxk_tim_bkt_fetch_sema(struct cnxk_tim_bkt *bktp) { - return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, __ATOMIC_RELAXED); + return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA, rte_memory_order_relaxed); } static inline uint64_t @@ -77,19 +77,19 @@ cnxk_tim_bkt_inc_lock(struct cnxk_tim_bkt *bktp) { const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK; - return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQUIRE); + return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acquire); } static inline void cnxk_tim_bkt_dec_lock(struct cnxk_tim_bkt *bktp) { - __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE); + rte_atomic_fetch_sub_explicit(&bktp->lock, 1, rte_memory_order_release); } static inline void cnxk_tim_bkt_dec_lock_relaxed(struct cnxk_tim_bkt *bktp) { - __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(&bktp->lock, 1, rte_memory_order_relaxed); } static inline uint32_t @@ -102,19 +102,19 @@ cnxk_tim_bkt_get_nent(uint64_t w1) static inline void cnxk_tim_bkt_inc_nent(struct cnxk_tim_bkt *bktp) { - __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed); } static inline void cnxk_tim_bkt_add_nent_relaxed(struct cnxk_tim_bkt *bktp, uint32_t v) { - __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed); } static inline void cnxk_tim_bkt_add_nent(struct cnxk_tim_bkt *bktp, uint32_t v) { - __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELEASE); + rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_release); } static inline uint64_t @@ -123,7 +123,7 @@ cnxk_tim_bkt_clr_nent(struct cnxk_tim_bkt *bktp) const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES << TIM_BUCKET_W1_S_NUM_ENTRIES); - return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v; + return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v; } static inline uint64_t @@ -132,6 +132,13 @@ cnxk_tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R) return (n - (d * rte_reciprocal_divide_u64(n, &R))); } +static inline void +cnxk_tim_format_event(const struct rte_event_timer *const tim, struct cnxk_tim_ent *const entry) +{ + entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | (tim->ev.event & 0xFFFFFFFFF); + entry->wqe = tim->ev.u64; +} + static __rte_always_inline void cnxk_tim_get_target_bucket(struct cnxk_tim_ring *const tim_ring, const uint32_t rel_bkt, struct cnxk_tim_bkt **bkt, @@ -273,8 +280,8 @@ cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring, : "memory"); #else do { - hbt_state = __atomic_load_n(&bkt->w1, - __ATOMIC_RELAXED); + hbt_state = rte_atomic_load_explicit(&bkt->w1, + rte_memory_order_relaxed); } while (hbt_state & BIT_ULL(33)); #endif @@ -356,8 +363,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring, : "memory"); #else do { - hbt_state = __atomic_load_n(&bkt->w1, - __ATOMIC_RELAXED); + hbt_state = rte_atomic_load_explicit(&bkt->w1, + rte_memory_order_relaxed); } while (hbt_state & BIT_ULL(33)); #endif @@ -385,8 +392,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring, : [crem] "r"(&bkt->w1) : "memory"); #else - while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) < - 0) + while (rte_atomic_load_explicit((int64_t __rte_atomic *)&bkt->w1, + rte_memory_order_relaxed) < 0) ; #endif goto __retry; @@ -408,15 +415,14 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring, *chunk = *pent; if (cnxk_tim_bkt_fetch_lock(lock_sema)) { do { - lock_sema = __atomic_load_n(&bkt->w1, - __ATOMIC_RELAXED); + lock_sema = rte_atomic_load_explicit(&bkt->w1, + rte_memory_order_relaxed); } while (cnxk_tim_bkt_fetch_lock(lock_sema) - 1); } - rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + rte_atomic_thread_fence(rte_memory_order_acquire); mirr_bkt->current_chunk = (uintptr_t)chunk; - __atomic_store_n(&bkt->chunk_remainder, - tim_ring->nb_chunk_slots - 1, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&bkt->chunk_remainder, tim_ring->nb_chunk_slots - 1, + rte_memory_order_release); } else { chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk; chunk += tim_ring->nb_chunk_slots - rem; @@ -489,8 +495,8 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, : "memory"); #else do { - hbt_state = __atomic_load_n(&bkt->w1, - __ATOMIC_RELAXED); + hbt_state = rte_atomic_load_explicit(&bkt->w1, + rte_memory_order_relaxed); } while (hbt_state & BIT_ULL(33)); #endif @@ -521,7 +527,7 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, : [lock] "r"(&bkt->lock) : "memory"); #else - while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED)) + while (rte_atomic_load_explicit(&bkt->lock, rte_memory_order_relaxed)) ; #endif goto __retry; @@ -574,6 +580,200 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, return nb_timers; } +static int +cnxk_tim_add_entry_hwwqe(struct cnxk_tim_ring *const tim_ring, struct rte_event_timer *const tim) +{ + uint64_t __rte_atomic *status; + uint64_t wdata, pa; + uintptr_t lmt_addr; + uint16_t lmt_id; + uint64_t *lmt; + uint64_t rsp; + int rc = 0; + + status = (uint64_t __rte_atomic *)&tim->impl_opaque[0]; + status[0] = 0; + status[1] = 0; + + lmt_addr = tim_ring->lmt_base; + ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id); + lmt = (uint64_t *)lmt_addr; + + lmt[0] = tim->timeout_ticks * tim_ring->tck_int; + lmt[1] = 0x1; + lmt[2] = (tim->ev.event & 0xFFC000000000) >> 6 | (tim->ev.event & 0xFFFFFFFFF); + lmt[3] = (uint64_t)tim; + + /* One LMT line is used, CNTM1 is 0 and SIZE_VEC is not included. */ + wdata = lmt_id; + /* SIZEM1 is 0 */ + pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0; + pa |= (1UL << 4); + roc_lmt_submit_steorl(wdata, pa); + + do { + rsp = rte_atomic_load_explicit(status, rte_memory_order_relaxed); + rsp &= 0xF0UL; + } while (!rsp); + + rsp >>= 4; + switch (rsp) { + case 0x3: + tim->state = RTE_EVENT_TIMER_ERROR_TOOEARLY; + rc = !rc; + break; + case 0x4: + tim->state = RTE_EVENT_TIMER_ERROR_TOOLATE; + rc = !rc; + break; + case 0x1: + tim->state = RTE_EVENT_TIMER_ARMED; + break; + default: + tim->state = RTE_EVENT_TIMER_ERROR; + rc = !rc; + break; + } + + return rc; +} + +static int +cnxk_tim_add_entry_tmo_hwwqe(struct cnxk_tim_ring *const tim_ring, + struct rte_event_timer **const tim, uint64_t intvl, uint16_t nb_timers) +{ + uint64_t __rte_atomic *status; + uint16_t cnt, i, j, done; + uint64_t wdata, pa; + uintptr_t lmt_addr; + uint16_t lmt_id; + uint64_t *lmt; + uint64_t rsp; + + /* We have 32 LMTLINES per core, but use only 1 line as we need to check status */ + lmt_addr = tim_ring->lmt_base; + ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id); + + done = 0; + lmt = (uint64_t *)lmt_addr; + /* We can do up to 7 timers per LMTLINE */ + cnt = nb_timers / CNXK_TIM_ENT_PER_LMT; + + lmt[0] = intvl; + lmt[1] = 0x1; /* Always relative */ + /* One LMT line is used, CNTM1 is 0 and SIZE_VEC is not included. */ + wdata = lmt_id; + /* SIZEM1 is 0 */ + pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0; + pa |= (uint64_t)(CNXK_TIM_ENT_PER_LMT << 4); + for (i = 0; i < cnt; i++) { + status = (uint64_t __rte_atomic *)&tim[i * CNXK_TIM_ENT_PER_LMT]->impl_opaque[0]; + + for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) { + cnxk_tim_format_event(tim[(i * CNXK_TIM_ENT_PER_LMT) + j], + (struct cnxk_tim_ent *)&lmt[(j << 1) + 2]); + tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->impl_opaque[0] = 0; + tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->impl_opaque[1] = 0; + tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state = RTE_EVENT_TIMER_ARMED; + } + + roc_lmt_submit_steorl(wdata, pa); + do { + rsp = rte_atomic_load_explicit(status, rte_memory_order_relaxed); + rsp &= 0xFUL; + } while (!rsp); + + done += CNXK_TIM_ENT_PER_LMT; + rsp &= 0xF; + if (rsp != 0x1) { + switch (rsp) { + case 0x3: + for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) + tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state = + RTE_EVENT_TIMER_ERROR_TOOEARLY; + done -= CNXK_TIM_ENT_PER_LMT; + break; + case 0x4: + for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) + tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state = + RTE_EVENT_TIMER_ERROR_TOOLATE; + done -= CNXK_TIM_ENT_PER_LMT; + break; + case 0x2: + default: + for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) { + if ((rte_atomic_load_explicit( + (uint64_t __rte_atomic + *)&tim[(i * CNXK_TIM_ENT_PER_LMT) + j] + ->impl_opaque[0], + rte_memory_order_relaxed) & + 0xF0) != 0x10) { + tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state = + RTE_EVENT_TIMER_ERROR; + done--; + } + } + break; + } + goto done; + } + } + + /* SIZEM1 is 0 */ + pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0; + pa |= (uint64_t)((nb_timers - cnt) << 4); + if (nb_timers - cnt) { + status = (uint64_t __rte_atomic *)&tim[cnt]->impl_opaque[0]; + + for (i = 0; i < nb_timers - cnt; i++) { + cnxk_tim_format_event(tim[cnt + i], + (struct cnxk_tim_ent *)&lmt[(i << 1) + 2]); + tim[cnt + i]->impl_opaque[0] = 0; + tim[cnt + i]->impl_opaque[1] = 0; + tim[cnt + i]->state = RTE_EVENT_TIMER_ARMED; + } + + roc_lmt_submit_steorl(wdata, pa); + do { + rsp = rte_atomic_load_explicit(status, rte_memory_order_relaxed); + rsp &= 0xFUL; + } while (!rsp); + + done += (nb_timers - cnt); + rsp &= 0xF; + if (rsp != 0x1) { + switch (rsp) { + case 0x3: + for (j = 0; j < nb_timers - cnt; j++) + tim[cnt + j]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY; + done -= (nb_timers - cnt); + break; + case 0x4: + for (j = 0; j < nb_timers - cnt; j++) + tim[cnt + j]->state = RTE_EVENT_TIMER_ERROR_TOOLATE; + done -= (nb_timers - cnt); + break; + case 0x2: + default: + for (j = 0; j < nb_timers - cnt; j++) { + if ((rte_atomic_load_explicit( + (uint64_t __rte_atomic *)&tim[cnt + j] + ->impl_opaque[0], + rte_memory_order_relaxed) & + 0xF0) != 0x10) { + tim[cnt + j]->state = RTE_EVENT_TIMER_ERROR; + done--; + } + } + break; + } + } + } + +done: + return done; +} + static int cnxk_tim_rm_entry(struct rte_event_timer *tim) { diff --git a/drivers/event/cnxk/cnxk_worker.c b/drivers/event/cnxk/cnxk_worker.c index 60876abcff..a07c9185d9 100644 --- a/drivers/event/cnxk/cnxk_worker.c +++ b/drivers/event/cnxk/cnxk_worker.c @@ -6,9 +6,7 @@ #include #include -#include "roc_platform.h" -#include "roc_sso.h" -#include "roc_sso_dp.h" +#include "roc_api.h" struct pwords { uint64_t u[5]; diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h index 0e0d728ba4..3592344e04 100644 --- a/drivers/event/cnxk/cnxk_worker.h +++ b/drivers/event/cnxk/cnxk_worker.h @@ -33,7 +33,8 @@ cnxk_sso_hws_swtag_desched(uint32_t tag, uint8_t new_tt, uint16_t grp, uint64_t val; val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34); - __atomic_store_n((uint64_t *)swtag_desched_op, val, __ATOMIC_RELEASE); + rte_atomic_store_explicit((uint64_t __rte_atomic *)swtag_desched_op, val, + rte_memory_order_release); } static __rte_always_inline void diff --git a/drivers/event/cnxk/deq/cn10k/deq_0_15.c b/drivers/event/cnxk/deq/cn10k/deq_0_15.c deleted file mode 100644 index e1ea267394..0000000000 --- a/drivers/event/cnxk/deq/cn10k/deq_0_15.c +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn10k_worker.h" - -#ifdef _ROC_API_H_ -#error "roc_api.h is included" -#endif - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn10k/deq_112_127_tmo_seg.c b/drivers/event/cnxk/deq/cn10k/deq_112_127_tmo_seg.c deleted file mode 100644 index 7bbe2086ee..0000000000 --- a/drivers/event/cnxk/deq/cn10k/deq_112_127_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn10k_worker.h" - -#ifdef _ROC_API_H_ -#error "roc_api.h is included" -#endif - - -#define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_32_47_tmo.c b/drivers/event/cnxk/deq/cn10k/deq_32_47_tmo.c deleted file mode 100644 index d1dabb0bc3..0000000000 --- a/drivers/event/cnxk/deq/cn10k/deq_32_47_tmo.c +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn10k_worker.h" - -#ifdef _ROC_API_H_ -#error "roc_api.h is included" -#endif - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn10k/deq_all_offload.c b/drivers/event/cnxk/deq/cn10k/deq_all_offload.c index 1d0ad548a6..c556f0358e 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_all_offload.c +++ b/drivers/event/cnxk/deq/cn10k/deq_all_offload.c @@ -11,7 +11,8 @@ #if defined(CNXK_DIS_TMPLT_FUNC) uint16_t __rte_hot -cn10k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, uint64_t timeout_ticks) +cn10k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) { const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | NIX_RX_OFFLOAD_CHECKSUM_F | NIX_RX_OFFLOAD_MARK_UPDATE_F | @@ -21,6 +22,7 @@ cn10k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, uint64_t timeout uint16_t ret = 1; uint64_t iter; + RTE_SET_USED(nb_events); if (ws->swtag_req) { ws->swtag_req = 0; ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); @@ -35,7 +37,8 @@ cn10k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, uint64_t timeout } uint16_t __rte_hot -cn10k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, uint64_t timeout_ticks) +cn10k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) { const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | NIX_RX_OFFLOAD_CHECKSUM_F | NIX_RX_OFFLOAD_MARK_UPDATE_F | @@ -45,6 +48,7 @@ cn10k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, uint64_t tim uint16_t ret = 1; uint64_t iter; + RTE_SET_USED(nb_events); if (ws->swtag_req) { ws->swtag_req = 0; ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); @@ -58,20 +62,4 @@ cn10k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, uint64_t tim return ret; } -uint16_t __rte_hot -cn10k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - return cn10k_sso_hws_deq_all_offload(port, ev, timeout_ticks); -} - -uint16_t __rte_hot -cn10k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - return cn10k_sso_hws_deq_all_offload_tst(port, ev, timeout_ticks); -} - #endif diff --git a/drivers/event/cnxk/deq/cn10k/deq_0_15_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_0_15_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_0_15_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_0_15_burst.c index 4110b24312..f7e0e8fe71 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_0_15_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_0_15_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_0_15 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_0_15_seg.c b/drivers/event/cnxk/deq/cn20k/deq_0_15_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_0_15_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_0_15_seg_burst.c index 9e48bd2170..7d5d4823c3 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_0_15_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_0_15_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_0_15 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_0_15_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_0_15_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_0_15_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_0_15_tmo_burst.c index 27fba138de..1bdc4bc82d 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_0_15_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_0_15_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_0_15 #undef R diff --git a/drivers/event/cnxk/deq/cn20k/deq_0_15_tmo_seg_burst.c b/drivers/event/cnxk/deq/cn20k/deq_0_15_tmo_seg_burst.c new file mode 100644 index 0000000000..d3ed5fcac0 --- /dev/null +++ b/drivers/event/cnxk/deq/cn20k/deq_0_15_tmo_seg_burst.c @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define R(name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + +NIX_RX_FASTPATH_MODES_0_15 +#undef R + +#endif diff --git a/drivers/event/cnxk/deq/cn10k/deq_112_127_seg.c b/drivers/event/cnxk/deq/cn20k/deq_112_127_burst.c similarity index 52% rename from drivers/event/cnxk/deq/cn10k/deq_112_127_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_112_127_burst.c index 47c1ecdf2f..29c21441cf 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_112_127_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_112_127_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_112_127 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_112_127.c b/drivers/event/cnxk/deq/cn20k/deq_112_127_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_112_127.c rename to drivers/event/cnxk/deq/cn20k/deq_112_127_seg_burst.c index d7c0c21770..004b5ecb95 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_112_127.c +++ b/drivers/event/cnxk/deq/cn20k/deq_112_127_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_112_127 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_112_127_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_112_127_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_112_127_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_112_127_tmo_burst.c index 9b4f028678..d544b39e9e 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_112_127_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_112_127_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_112_127 #undef R diff --git a/drivers/event/cnxk/deq/cn20k/deq_112_127_tmo_seg_burst.c b/drivers/event/cnxk/deq/cn20k/deq_112_127_tmo_seg_burst.c new file mode 100644 index 0000000000..ba7a1207ad --- /dev/null +++ b/drivers/event/cnxk/deq/cn20k/deq_112_127_tmo_seg_burst.c @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define R(name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + +NIX_RX_FASTPATH_MODES_112_127 +#undef R + +#endif diff --git a/drivers/event/cnxk/deq/cn10k/deq_16_31_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_16_31_burst.c similarity index 53% rename from drivers/event/cnxk/deq/cn10k/deq_16_31_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_16_31_burst.c index bf00a04f34..eb7382e9d9 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_16_31_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_16_31_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,9 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, \ - flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F_) NIX_RX_FASTPATH_MODES_16_31 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_16_31.c b/drivers/event/cnxk/deq/cn20k/deq_16_31_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_16_31.c rename to drivers/event/cnxk/deq/cn20k/deq_16_31_seg_burst.c index e5776a8b64..770b7221e6 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_16_31.c +++ b/drivers/event/cnxk/deq/cn20k/deq_16_31_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_16_31 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_16_31_seg.c b/drivers/event/cnxk/deq/cn20k/deq_16_31_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_16_31_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_16_31_tmo_burst.c index 83e1ace903..1e71d22fc3 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_16_31_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_16_31_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_16_31 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_16_31_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_16_31_tmo_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_16_31_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_16_31_tmo_seg_burst.c index e17e71db2b..1a9e7efa0a 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_16_31_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_16_31_tmo_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_16_31 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_32_47_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_32_47_burst.c similarity index 53% rename from drivers/event/cnxk/deq/cn10k/deq_32_47_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_32_47_burst.c index a8d0240871..3d51bd6659 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_32_47_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_32_47_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,9 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, \ - flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F_) NIX_RX_FASTPATH_MODES_32_47 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_32_47.c b/drivers/event/cnxk/deq/cn20k/deq_32_47_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_32_47.c rename to drivers/event/cnxk/deq/cn20k/deq_32_47_seg_burst.c index 9fc32fc8f1..851b5b7d31 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_32_47.c +++ b/drivers/event/cnxk/deq/cn20k/deq_32_47_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_32_47 #undef R diff --git a/drivers/event/cnxk/deq/cn20k/deq_32_47_tmo_burst.c b/drivers/event/cnxk/deq/cn20k/deq_32_47_tmo_burst.c new file mode 100644 index 0000000000..038ba726a0 --- /dev/null +++ b/drivers/event/cnxk/deq/cn20k/deq_32_47_tmo_burst.c @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define R(name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, \ + flags | NIX_RX_REAS_F) + +NIX_RX_FASTPATH_MODES_32_47 +#undef R + +#endif diff --git a/drivers/event/cnxk/deq/cn10k/deq_32_47_seg.c b/drivers/event/cnxk/deq/cn20k/deq_32_47_tmo_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_32_47_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_32_47_tmo_seg_burst.c index 8ce1875e98..68fb3ff53d 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_32_47_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_32_47_tmo_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_32_47 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_48_63_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_48_63_burst.c similarity index 53% rename from drivers/event/cnxk/deq/cn10k/deq_48_63_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_48_63_burst.c index d8e03e2b9c..84f3ccd39c 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_48_63_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_48_63_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,9 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, \ - flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_48_63 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_48_63_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_48_63_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_48_63_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_48_63_seg_burst.c index 12d045d429..417f622412 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_48_63_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_48_63_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_48_63 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_48_63.c b/drivers/event/cnxk/deq/cn20k/deq_48_63_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_48_63.c rename to drivers/event/cnxk/deq/cn20k/deq_48_63_tmo_burst.c index 7a8c548e60..7fbea69134 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_48_63.c +++ b/drivers/event/cnxk/deq/cn20k/deq_48_63_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_48_63 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_48_63_seg.c b/drivers/event/cnxk/deq/cn20k/deq_48_63_tmo_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_48_63_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_48_63_tmo_seg_burst.c index 7a85c7a7ba..3bee216768 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_48_63_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_48_63_tmo_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_48_63 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_64_79_seg.c b/drivers/event/cnxk/deq/cn20k/deq_64_79_burst.c similarity index 52% rename from drivers/event/cnxk/deq/cn10k/deq_64_79_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_64_79_burst.c index 6c01a9cce1..9b341a0df5 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_64_79_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_64_79_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_64_79 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_64_79_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_64_79_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_64_79_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_64_79_seg_burst.c index d3dad21179..1f051f74a9 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_64_79_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_64_79_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_64_79 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_64_79_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_64_79_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_64_79_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_64_79_tmo_burst.c index 3b08f65652..c134e27f25 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_64_79_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_64_79_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_64_79 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_64_79.c b/drivers/event/cnxk/deq/cn20k/deq_64_79_tmo_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_64_79.c rename to drivers/event/cnxk/deq/cn20k/deq_64_79_tmo_seg_burst.c index b429886634..849e8e12fc 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_64_79.c +++ b/drivers/event/cnxk/deq/cn20k/deq_64_79_tmo_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_64_79 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_80_95_seg.c b/drivers/event/cnxk/deq/cn20k/deq_80_95_burst.c similarity index 52% rename from drivers/event/cnxk/deq/cn10k/deq_80_95_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_80_95_burst.c index 9c0cdce7b8..9724caf5d6 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_80_95_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_80_95_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_80_95 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_80_95_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_80_95_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_80_95_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_80_95_seg_burst.c index 3a7e93ff4c..997c208511 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_80_95_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_80_95_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_80_95 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_80_95_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_80_95_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_80_95_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_80_95_tmo_burst.c index 31c18af76a..bcf32e646b 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_80_95_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_80_95_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_80_95 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_80_95.c b/drivers/event/cnxk/deq/cn20k/deq_80_95_tmo_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_80_95.c rename to drivers/event/cnxk/deq/cn20k/deq_80_95_tmo_seg_burst.c index 22333b5928..b24e73439a 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_80_95.c +++ b/drivers/event/cnxk/deq/cn20k/deq_80_95_tmo_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_80_95 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_96_111_seg.c b/drivers/event/cnxk/deq/cn20k/deq_96_111_burst.c similarity index 52% rename from drivers/event/cnxk/deq/cn10k/deq_96_111_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_96_111_burst.c index be97fbcf60..c03d034b66 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_96_111_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_96_111_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_deq_seg_##name, flags) \ - SSO_DEQ_SEG(cn10k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_burst_##name, \ + cn20k_sso_hws_deq_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_burst_##name, \ + cn20k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_96_111 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_96_111_tmo_seg.c b/drivers/event/cnxk/deq/cn20k/deq_96_111_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_96_111_tmo_seg.c rename to drivers/event/cnxk/deq/cn20k/deq_96_111_seg_burst.c index 9a3d09fae7..b37ef7a998 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_96_111_tmo_seg.c +++ b/drivers/event/cnxk/deq/cn20k/deq_96_111_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_deq_tmo_seg_##name, flags) \ - SSO_DEQ_TMO_SEG(cn10k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_deq_seg_burst_##name, \ + cn20k_sso_hws_deq_seg_##name, flags) \ + SSO_CMN_DEQ_SEG_BURST(cn20k_sso_hws_reas_deq_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_96_111 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_96_111_tmo.c b/drivers/event/cnxk/deq/cn20k/deq_96_111_tmo_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_96_111_tmo.c rename to drivers/event/cnxk/deq/cn20k/deq_96_111_tmo_burst.c index cb90b5c31a..da76b589a0 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_96_111_tmo.c +++ b/drivers/event/cnxk/deq/cn20k/deq_96_111_tmo_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_deq_tmo_##name, flags) \ - SSO_DEQ_TMO(cn10k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_burst_##name, \ + cn20k_sso_hws_deq_tmo_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_96_111 #undef R diff --git a/drivers/event/cnxk/deq/cn10k/deq_96_111.c b/drivers/event/cnxk/deq/cn20k/deq_96_111_tmo_seg_burst.c similarity index 51% rename from drivers/event/cnxk/deq/cn10k/deq_96_111.c rename to drivers/event/cnxk/deq/cn20k/deq_96_111_tmo_seg_burst.c index 4478d0475d..3a8c02e4d2 100644 --- a/drivers/event/cnxk/deq/cn10k/deq_96_111.c +++ b/drivers/event/cnxk/deq/cn20k/deq_96_111_tmo_seg_burst.c @@ -2,7 +2,7 @@ * Copyright(C) 2022 Marvell. */ -#include "cn10k_worker.h" +#include "cn20k_worker.h" #ifdef _ROC_API_H_ #error "roc_api.h is included" @@ -11,8 +11,10 @@ #if !defined(CNXK_DIS_TMPLT_FUNC) #define R(name, flags) \ - SSO_DEQ(cn10k_sso_hws_deq_##name, flags) \ - SSO_DEQ(cn10k_sso_hws_reas_deq_##name, flags | NIX_RX_REAS_F) + SSO_CMN_DEQ_BURST(cn20k_sso_hws_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_deq_tmo_seg_##name, flags) \ + SSO_CMN_DEQ_BURST(cn20k_sso_hws_reas_deq_tmo_seg_burst_##name, \ + cn20k_sso_hws_reas_deq_tmo_seg_##name, flags | NIX_RX_REAS_F) NIX_RX_FASTPATH_MODES_96_111 #undef R diff --git a/drivers/event/cnxk/deq/cn20k/deq_all_offload.c b/drivers/event/cnxk/deq/cn20k/deq_all_offload.c new file mode 100644 index 0000000000..3983736b7e --- /dev/null +++ b/drivers/event/cnxk/deq/cn20k/deq_all_offload.c @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if defined(CNXK_DIS_TMPLT_FUNC) + +uint16_t __rte_hot +cn20k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) +{ + const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | + NIX_RX_OFFLOAD_CHECKSUM_F | NIX_RX_OFFLOAD_MARK_UPDATE_F | + NIX_RX_OFFLOAD_VLAN_STRIP_F | + NIX_RX_OFFLOAD_SECURITY_F | NIX_RX_MULTI_SEG_F | NIX_RX_REAS_F); + struct cn20k_sso_hws *ws = port; + uint16_t ret = 1; + uint64_t iter; + + RTE_SET_USED(nb_events); + if (ws->swtag_req) { + ws->swtag_req = 0; + ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); + return ret; + } + + ret = cn20k_sso_hws_get_work(ws, ev, flags); + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) + ret = cn20k_sso_hws_get_work(ws, ev, flags); + + return ret; +} + +uint16_t __rte_hot +cn20k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) +{ + const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | + NIX_RX_OFFLOAD_CHECKSUM_F | NIX_RX_OFFLOAD_MARK_UPDATE_F | + NIX_RX_OFFLOAD_TSTAMP_F | NIX_RX_OFFLOAD_VLAN_STRIP_F | + NIX_RX_OFFLOAD_SECURITY_F | NIX_RX_MULTI_SEG_F | NIX_RX_REAS_F); + struct cn20k_sso_hws *ws = port; + uint16_t ret = 1; + uint64_t iter; + + RTE_SET_USED(nb_events); + if (ws->swtag_req) { + ws->swtag_req = 0; + ws->gw_rdata = cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); + return ret; + } + + ret = cn20k_sso_hws_get_work(ws, ev, flags); + for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) + ret = cn20k_sso_hws_get_work(ws, ev, flags); + + return ret; +} + +#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15.c b/drivers/event/cnxk/deq/cn9k/deq_0_15.c deleted file mode 100644 index 8254fad3a2..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_dual.c deleted file mode 100644 index 91a69ea0d7..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_seg.c deleted file mode 100644 index 8a1dfceb14..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_tmo.c deleted file mode 100644 index 6028dd679a..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_tmo_seg.c deleted file mode 100644 index 55405b0f7b..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_seg.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_seg.c deleted file mode 100644 index 5d01377d77..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_tmo.c deleted file mode 100644 index b7d566f2f1..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_0_15_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_0_15_tmo_seg.c deleted file mode 100644 index ea83fe83d4..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_0_15_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_0_15 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127.c b/drivers/event/cnxk/deq/cn9k/deq_112_127.c deleted file mode 100644 index 63d06f41cb..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_dual.c deleted file mode 100644 index 766c02df2a..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_seg.c deleted file mode 100644 index f452db50c1..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_tmo.c deleted file mode 100644 index 0b549f5214..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_tmo_seg.c deleted file mode 100644 index 9dd678843f..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_seg.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_seg.c deleted file mode 100644 index 090730be2e..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_tmo.c deleted file mode 100644 index 7796446dc5..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_112_127_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_112_127_tmo_seg.c deleted file mode 100644 index 125d27f098..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_112_127_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_112_127 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31.c b/drivers/event/cnxk/deq/cn9k/deq_16_31.c deleted file mode 100644 index be209d9e09..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_dual.c deleted file mode 100644 index c214893f2d..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_seg.c deleted file mode 100644 index bd1ce1a7fa..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_tmo.c deleted file mode 100644 index 0c163186da..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_tmo_seg.c deleted file mode 100644 index 542dc51e11..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_seg.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_seg.c deleted file mode 100644 index 86302a6a12..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_tmo.c deleted file mode 100644 index c3018902fd..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_16_31_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_16_31_tmo_seg.c deleted file mode 100644 index 2518dffe50..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_16_31_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_16_31 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47.c b/drivers/event/cnxk/deq/cn9k/deq_32_47.c deleted file mode 100644 index 1167097c6c..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_dual.c deleted file mode 100644 index d51fdd88b5..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_seg.c deleted file mode 100644 index f465582540..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_tmo.c deleted file mode 100644 index 6afdcc26a0..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_tmo_seg.c deleted file mode 100644 index 016f410e48..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_seg.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_seg.c deleted file mode 100644 index c00a5749cc..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_tmo.c deleted file mode 100644 index 5345264883..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_32_47_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_32_47_tmo_seg.c deleted file mode 100644 index 94c7b08638..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_32_47_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_32_47 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63.c b/drivers/event/cnxk/deq/cn9k/deq_48_63.c deleted file mode 100644 index 6e2e6e49cc..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_dual.c deleted file mode 100644 index cdc1defebf..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_seg.c deleted file mode 100644 index 49efa68b16..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_tmo.c deleted file mode 100644 index bd31462d96..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_tmo_seg.c deleted file mode 100644 index cd047f48e5..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_seg.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_seg.c deleted file mode 100644 index b485d2163e..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_tmo.c deleted file mode 100644 index 10de29e85d..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_48_63_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_48_63_tmo_seg.c deleted file mode 100644 index b553d2dc53..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_48_63_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_48_63 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79.c b/drivers/event/cnxk/deq/cn9k/deq_64_79.c deleted file mode 100644 index 169c4244d9..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_dual.c deleted file mode 100644 index 4f1c6624ed..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_seg.c deleted file mode 100644 index b5382fc908..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_tmo.c deleted file mode 100644 index 68aeb1b0d5..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_tmo_seg.c deleted file mode 100644 index 763568e25a..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_seg.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_seg.c deleted file mode 100644 index 98d7a31a4a..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_tmo.c deleted file mode 100644 index b79dc6a7c1..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_64_79_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_64_79_tmo_seg.c deleted file mode 100644 index 92c9400d91..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_64_79_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_64_79 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95.c b/drivers/event/cnxk/deq/cn9k/deq_80_95.c deleted file mode 100644 index 64000836c2..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_dual.c deleted file mode 100644 index c69d2f4100..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_seg.c deleted file mode 100644 index f232d36778..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_tmo.c deleted file mode 100644 index 948695fcf8..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_tmo_seg.c deleted file mode 100644 index d3055e348b..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_seg.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_seg.c deleted file mode 100644 index 76bef5e3a8..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_tmo.c deleted file mode 100644 index 0c1bc7bf42..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_80_95_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_80_95_tmo_seg.c deleted file mode 100644 index 2a8f42af79..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_80_95_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_80_95 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111.c b/drivers/event/cnxk/deq/cn9k/deq_96_111.c deleted file mode 100644 index a2eed2643b..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ(cn9k_sso_hws_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_dual.c deleted file mode 100644 index 706ecee7e8..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ(cn9k_sso_hws_dual_deq_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_seg.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_seg.c deleted file mode 100644 index f9c71d52b0..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_SEG(cn9k_sso_hws_dual_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_tmo.c deleted file mode 100644 index 0b5eb0fbf8..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DUAL_DEQ_TMO(cn9k_sso_hws_dual_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_tmo_seg.c deleted file mode 100644 index 8bb6ff5e93..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_dual_tmo_seg.c +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) \ - SSO_DUAL_DEQ_TMO_SEG(cn9k_sso_hws_dual_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_seg.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_seg.c deleted file mode 100644 index ac293a5f5d..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_SEG(cn9k_sso_hws_deq_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_tmo.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_tmo.c deleted file mode 100644 index 20965f3d3f..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_tmo.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO(cn9k_sso_hws_deq_tmo_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_96_111_tmo_seg.c b/drivers/event/cnxk/deq/cn9k/deq_96_111_tmo_seg.c deleted file mode 100644 index 4fe1e582ff..0000000000 --- a/drivers/event/cnxk/deq/cn9k/deq_96_111_tmo_seg.c +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2022 Marvell. - */ - -#include "cn9k_worker.h" -#include "cnxk_eventdev.h" -#include "cnxk_worker.h" - -#if !defined(CNXK_DIS_TMPLT_FUNC) - -#define R(name, flags) SSO_DEQ_TMO_SEG(cn9k_sso_hws_deq_tmo_seg_##name, flags) - -NIX_RX_FASTPATH_MODES_96_111 -#undef R - -#endif diff --git a/drivers/event/cnxk/deq/cn9k/deq_all_offload.c b/drivers/event/cnxk/deq/cn9k/deq_all_offload.c index f8288fbb4e..f163a05fce 100644 --- a/drivers/event/cnxk/deq/cn9k/deq_all_offload.c +++ b/drivers/event/cnxk/deq/cn9k/deq_all_offload.c @@ -9,16 +9,18 @@ #if defined(CNXK_DIS_TMPLT_FUNC) uint16_t __rte_hot -cn9k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, uint64_t timeout_ticks) +cn9k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) { const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | NIX_RX_OFFLOAD_CHECKSUM_F | NIX_RX_OFFLOAD_MARK_UPDATE_F | NIX_RX_OFFLOAD_VLAN_STRIP_F | NIX_RX_OFFLOAD_SECURITY_F | NIX_RX_MULTI_SEG_F); - struct cn9k_sso_hws *ws = port; uint16_t ret = 1; uint64_t iter; + + RTE_SET_USED(nb_events); if (ws->swtag_req) { ws->swtag_req = 0; cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); @@ -31,7 +33,8 @@ cn9k_sso_hws_deq_all_offload(void *port, struct rte_event *ev, uint64_t timeout_ } uint16_t __rte_hot -cn9k_sso_hws_deq_dual_all_offload(void *port, struct rte_event *ev, uint64_t timeout_ticks) +cn9k_sso_hws_deq_dual_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) { const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | NIX_RX_OFFLOAD_CHECKSUM_F | @@ -41,6 +44,7 @@ cn9k_sso_hws_deq_dual_all_offload(void *port, struct rte_event *ev, uint64_t tim uint16_t ret = 1; uint64_t iter; + RTE_SET_USED(nb_events); if (dws->swtag_req) { dws->swtag_req = 0; cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + SSOW_LF_GWS_TAG); @@ -57,7 +61,8 @@ cn9k_sso_hws_deq_dual_all_offload(void *port, struct rte_event *ev, uint64_t tim } uint16_t __rte_hot -cn9k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, uint64_t timeout_ticks) +cn9k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) { const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | NIX_RX_OFFLOAD_CHECKSUM_F | @@ -67,6 +72,8 @@ cn9k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, uint64_t time struct cn9k_sso_hws *ws = port; uint16_t ret = 1; uint64_t iter; + + RTE_SET_USED(nb_events); if (ws->swtag_req) { ws->swtag_req = 0; cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); @@ -79,7 +86,8 @@ cn9k_sso_hws_deq_all_offload_tst(void *port, struct rte_event *ev, uint64_t time } uint16_t __rte_hot -cn9k_sso_hws_deq_dual_all_offload_tst(void *port, struct rte_event *ev, uint64_t timeout_ticks) +cn9k_sso_hws_deq_dual_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, + uint64_t timeout_ticks) { const uint32_t flags = (NIX_RX_OFFLOAD_RSS_F | NIX_RX_OFFLOAD_PTYPE_F | NIX_RX_OFFLOAD_CHECKSUM_F | @@ -89,6 +97,7 @@ cn9k_sso_hws_deq_dual_all_offload_tst(void *port, struct rte_event *ev, uint64_t uint16_t ret = 1; uint64_t iter; + RTE_SET_USED(nb_events); if (dws->swtag_req) { dws->swtag_req = 0; cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + SSOW_LF_GWS_TAG); @@ -104,36 +113,4 @@ cn9k_sso_hws_deq_dual_all_offload_tst(void *port, struct rte_event *ev, uint64_t return ret; } -uint16_t __rte_hot -cn9k_sso_hws_deq_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - return cn9k_sso_hws_deq_all_offload(port, ev, timeout_ticks); -} - -uint16_t __rte_hot -cn9k_sso_hws_deq_dual_burst_all_offload(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - return cn9k_sso_hws_deq_dual_all_offload(port, ev, timeout_ticks); -} - -uint16_t __rte_hot -cn9k_sso_hws_deq_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - return cn9k_sso_hws_deq_all_offload_tst(port, ev, timeout_ticks); -} - -uint16_t __rte_hot -cn9k_sso_hws_deq_dual_burst_all_offload_tst(void *port, struct rte_event ev[], uint16_t nb_events, - uint64_t timeout_ticks) -{ - RTE_SET_USED(nb_events); - return cn9k_sso_hws_deq_dual_all_offload_tst(port, ev, timeout_ticks); -} - #endif diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build index 2c1060ad87..8aaf8116f7 100644 --- a/drivers/event/cnxk/meson.build +++ b/drivers/event/cnxk/meson.build @@ -14,7 +14,7 @@ else soc_type = platform endif -if soc_type != 'cn9k' and soc_type != 'cn10k' +if soc_type != 'cn9k' and soc_type != 'cn10k' and soc_type != 'cn20k' soc_type = 'all' endif @@ -59,30 +59,6 @@ sources += files( 'deq/cn9k/deq_80_95_seg_burst.c', 'deq/cn9k/deq_96_111_seg_burst.c', 'deq/cn9k/deq_112_127_seg_burst.c', - 'deq/cn9k/deq_0_15.c', - 'deq/cn9k/deq_16_31.c', - 'deq/cn9k/deq_32_47.c', - 'deq/cn9k/deq_48_63.c', - 'deq/cn9k/deq_64_79.c', - 'deq/cn9k/deq_80_95.c', - 'deq/cn9k/deq_96_111.c', - 'deq/cn9k/deq_112_127.c', - 'deq/cn9k/deq_0_15_seg.c', - 'deq/cn9k/deq_16_31_seg.c', - 'deq/cn9k/deq_32_47_seg.c', - 'deq/cn9k/deq_48_63_seg.c', - 'deq/cn9k/deq_64_79_seg.c', - 'deq/cn9k/deq_80_95_seg.c', - 'deq/cn9k/deq_96_111_seg.c', - 'deq/cn9k/deq_112_127_seg.c', - 'deq/cn9k/deq_0_15_tmo.c', - 'deq/cn9k/deq_16_31_tmo.c', - 'deq/cn9k/deq_32_47_tmo.c', - 'deq/cn9k/deq_48_63_tmo.c', - 'deq/cn9k/deq_64_79_tmo.c', - 'deq/cn9k/deq_80_95_tmo.c', - 'deq/cn9k/deq_96_111_tmo.c', - 'deq/cn9k/deq_112_127_tmo.c', 'deq/cn9k/deq_0_15_tmo_burst.c', 'deq/cn9k/deq_16_31_tmo_burst.c', 'deq/cn9k/deq_32_47_tmo_burst.c', @@ -91,14 +67,6 @@ sources += files( 'deq/cn9k/deq_80_95_tmo_burst.c', 'deq/cn9k/deq_96_111_tmo_burst.c', 'deq/cn9k/deq_112_127_tmo_burst.c', - 'deq/cn9k/deq_0_15_tmo_seg.c', - 'deq/cn9k/deq_16_31_tmo_seg.c', - 'deq/cn9k/deq_32_47_tmo_seg.c', - 'deq/cn9k/deq_48_63_tmo_seg.c', - 'deq/cn9k/deq_64_79_tmo_seg.c', - 'deq/cn9k/deq_80_95_tmo_seg.c', - 'deq/cn9k/deq_96_111_tmo_seg.c', - 'deq/cn9k/deq_112_127_tmo_seg.c', 'deq/cn9k/deq_0_15_tmo_seg_burst.c', 'deq/cn9k/deq_16_31_tmo_seg_burst.c', 'deq/cn9k/deq_32_47_tmo_seg_burst.c', @@ -126,30 +94,6 @@ sources += files( 'deq/cn9k/deq_80_95_dual_seg_burst.c', 'deq/cn9k/deq_96_111_dual_seg_burst.c', 'deq/cn9k/deq_112_127_dual_seg_burst.c', - 'deq/cn9k/deq_0_15_dual.c', - 'deq/cn9k/deq_16_31_dual.c', - 'deq/cn9k/deq_32_47_dual.c', - 'deq/cn9k/deq_48_63_dual.c', - 'deq/cn9k/deq_64_79_dual.c', - 'deq/cn9k/deq_80_95_dual.c', - 'deq/cn9k/deq_96_111_dual.c', - 'deq/cn9k/deq_112_127_dual.c', - 'deq/cn9k/deq_0_15_dual_seg.c', - 'deq/cn9k/deq_16_31_dual_seg.c', - 'deq/cn9k/deq_32_47_dual_seg.c', - 'deq/cn9k/deq_48_63_dual_seg.c', - 'deq/cn9k/deq_64_79_dual_seg.c', - 'deq/cn9k/deq_80_95_dual_seg.c', - 'deq/cn9k/deq_96_111_dual_seg.c', - 'deq/cn9k/deq_112_127_dual_seg.c', - 'deq/cn9k/deq_0_15_dual_tmo.c', - 'deq/cn9k/deq_16_31_dual_tmo.c', - 'deq/cn9k/deq_32_47_dual_tmo.c', - 'deq/cn9k/deq_48_63_dual_tmo.c', - 'deq/cn9k/deq_64_79_dual_tmo.c', - 'deq/cn9k/deq_80_95_dual_tmo.c', - 'deq/cn9k/deq_96_111_dual_tmo.c', - 'deq/cn9k/deq_112_127_dual_tmo.c', 'deq/cn9k/deq_0_15_dual_tmo_burst.c', 'deq/cn9k/deq_16_31_dual_tmo_burst.c', 'deq/cn9k/deq_32_47_dual_tmo_burst.c', @@ -158,14 +102,6 @@ sources += files( 'deq/cn9k/deq_80_95_dual_tmo_burst.c', 'deq/cn9k/deq_96_111_dual_tmo_burst.c', 'deq/cn9k/deq_112_127_dual_tmo_burst.c', - 'deq/cn9k/deq_0_15_dual_tmo_seg.c', - 'deq/cn9k/deq_16_31_dual_tmo_seg.c', - 'deq/cn9k/deq_32_47_dual_tmo_seg.c', - 'deq/cn9k/deq_48_63_dual_tmo_seg.c', - 'deq/cn9k/deq_64_79_dual_tmo_seg.c', - 'deq/cn9k/deq_80_95_dual_tmo_seg.c', - 'deq/cn9k/deq_96_111_dual_tmo_seg.c', - 'deq/cn9k/deq_112_127_dual_tmo_seg.c', 'deq/cn9k/deq_0_15_dual_tmo_seg_burst.c', 'deq/cn9k/deq_16_31_dual_tmo_seg_burst.c', 'deq/cn9k/deq_32_47_dual_tmo_seg_burst.c', @@ -247,30 +183,6 @@ sources += files( 'deq/cn10k/deq_80_95_seg_burst.c', 'deq/cn10k/deq_96_111_seg_burst.c', 'deq/cn10k/deq_112_127_seg_burst.c', - 'deq/cn10k/deq_0_15.c', - 'deq/cn10k/deq_16_31.c', - 'deq/cn10k/deq_32_47.c', - 'deq/cn10k/deq_48_63.c', - 'deq/cn10k/deq_64_79.c', - 'deq/cn10k/deq_80_95.c', - 'deq/cn10k/deq_96_111.c', - 'deq/cn10k/deq_112_127.c', - 'deq/cn10k/deq_0_15_seg.c', - 'deq/cn10k/deq_16_31_seg.c', - 'deq/cn10k/deq_32_47_seg.c', - 'deq/cn10k/deq_48_63_seg.c', - 'deq/cn10k/deq_64_79_seg.c', - 'deq/cn10k/deq_80_95_seg.c', - 'deq/cn10k/deq_96_111_seg.c', - 'deq/cn10k/deq_112_127_seg.c', - 'deq/cn10k/deq_0_15_tmo.c', - 'deq/cn10k/deq_16_31_tmo.c', - 'deq/cn10k/deq_32_47_tmo.c', - 'deq/cn10k/deq_48_63_tmo.c', - 'deq/cn10k/deq_64_79_tmo.c', - 'deq/cn10k/deq_80_95_tmo.c', - 'deq/cn10k/deq_96_111_tmo.c', - 'deq/cn10k/deq_112_127_tmo.c', 'deq/cn10k/deq_0_15_tmo_burst.c', 'deq/cn10k/deq_16_31_tmo_burst.c', 'deq/cn10k/deq_32_47_tmo_burst.c', @@ -279,14 +191,6 @@ sources += files( 'deq/cn10k/deq_80_95_tmo_burst.c', 'deq/cn10k/deq_96_111_tmo_burst.c', 'deq/cn10k/deq_112_127_tmo_burst.c', - 'deq/cn10k/deq_0_15_tmo_seg.c', - 'deq/cn10k/deq_16_31_tmo_seg.c', - 'deq/cn10k/deq_32_47_tmo_seg.c', - 'deq/cn10k/deq_48_63_tmo_seg.c', - 'deq/cn10k/deq_64_79_tmo_seg.c', - 'deq/cn10k/deq_80_95_tmo_seg.c', - 'deq/cn10k/deq_96_111_tmo_seg.c', - 'deq/cn10k/deq_112_127_tmo_seg.c', 'deq/cn10k/deq_0_15_tmo_seg_burst.c', 'deq/cn10k/deq_16_31_tmo_seg_burst.c', 'deq/cn10k/deq_32_47_tmo_seg_burst.c', @@ -325,6 +229,76 @@ sources += files( endif endif +if soc_type == 'cn20k' or soc_type == 'all' +sources += files( + 'cn20k_eventdev.c', + 'cn20k_worker.c', +) + +if host_machine.cpu_family().startswith('aarch') and not disable_template +sources += files( + 'deq/cn20k/deq_0_15_burst.c', + 'deq/cn20k/deq_16_31_burst.c', + 'deq/cn20k/deq_32_47_burst.c', + 'deq/cn20k/deq_48_63_burst.c', + 'deq/cn20k/deq_64_79_burst.c', + 'deq/cn20k/deq_80_95_burst.c', + 'deq/cn20k/deq_96_111_burst.c', + 'deq/cn20k/deq_112_127_burst.c', + 'deq/cn20k/deq_0_15_seg_burst.c', + 'deq/cn20k/deq_16_31_seg_burst.c', + 'deq/cn20k/deq_32_47_seg_burst.c', + 'deq/cn20k/deq_48_63_seg_burst.c', + 'deq/cn20k/deq_64_79_seg_burst.c', + 'deq/cn20k/deq_80_95_seg_burst.c', + 'deq/cn20k/deq_96_111_seg_burst.c', + 'deq/cn20k/deq_112_127_seg_burst.c', + 'deq/cn20k/deq_0_15_tmo_burst.c', + 'deq/cn20k/deq_16_31_tmo_burst.c', + 'deq/cn20k/deq_32_47_tmo_burst.c', + 'deq/cn20k/deq_48_63_tmo_burst.c', + 'deq/cn20k/deq_64_79_tmo_burst.c', + 'deq/cn20k/deq_80_95_tmo_burst.c', + 'deq/cn20k/deq_96_111_tmo_burst.c', + 'deq/cn20k/deq_112_127_tmo_burst.c', + 'deq/cn20k/deq_0_15_tmo_seg_burst.c', + 'deq/cn20k/deq_16_31_tmo_seg_burst.c', + 'deq/cn20k/deq_32_47_tmo_seg_burst.c', + 'deq/cn20k/deq_48_63_tmo_seg_burst.c', + 'deq/cn20k/deq_64_79_tmo_seg_burst.c', + 'deq/cn20k/deq_80_95_tmo_seg_burst.c', + 'deq/cn20k/deq_96_111_tmo_seg_burst.c', + 'deq/cn20k/deq_112_127_tmo_seg_burst.c', + 'deq/cn20k/deq_all_offload.c', +) + +sources += files( + 'tx/cn20k/tx_0_15.c', + 'tx/cn20k/tx_16_31.c', + 'tx/cn20k/tx_32_47.c', + 'tx/cn20k/tx_48_63.c', + 'tx/cn20k/tx_64_79.c', + 'tx/cn20k/tx_80_95.c', + 'tx/cn20k/tx_96_111.c', + 'tx/cn20k/tx_112_127.c', + 'tx/cn20k/tx_0_15_seg.c', + 'tx/cn20k/tx_16_31_seg.c', + 'tx/cn20k/tx_32_47_seg.c', + 'tx/cn20k/tx_48_63_seg.c', + 'tx/cn20k/tx_64_79_seg.c', + 'tx/cn20k/tx_80_95_seg.c', + 'tx/cn20k/tx_96_111_seg.c', + 'tx/cn20k/tx_112_127_seg.c', + 'tx/cn20k/tx_all_offload.c', +) +else +sources += files( + 'deq/cn20k/deq_all_offload.c', + 'tx/cn20k/tx_all_offload.c', +) +endif +endif + extra_flags = ['-flax-vector-conversions', '-Wno-strict-aliasing'] if cc.get_id() == 'clang' extra_flags += ['-Wno-asm-operand-widths'] diff --git a/drivers/event/cnxk/tx/cn20k/tx_0_15.c b/drivers/event/cnxk/tx/cn20k/tx_0_15.c new file mode 100644 index 0000000000..b681bc8ab0 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_0_15.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_0_15 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_0_15_seg.c b/drivers/event/cnxk/tx/cn20k/tx_0_15_seg.c new file mode 100644 index 0000000000..1dacb63d4b --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_0_15_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_0_15 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_112_127.c b/drivers/event/cnxk/tx/cn20k/tx_112_127.c new file mode 100644 index 0000000000..abdb8b76a1 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_112_127.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_112_127 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_112_127_seg.c b/drivers/event/cnxk/tx/cn20k/tx_112_127_seg.c new file mode 100644 index 0000000000..c39d331b25 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_112_127_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_112_127 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_16_31.c b/drivers/event/cnxk/tx/cn20k/tx_16_31.c new file mode 100644 index 0000000000..5b88c47914 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_16_31.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_16_31 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_16_31_seg.c b/drivers/event/cnxk/tx/cn20k/tx_16_31_seg.c new file mode 100644 index 0000000000..13f00ac478 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_16_31_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_16_31 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_32_47.c b/drivers/event/cnxk/tx/cn20k/tx_32_47.c new file mode 100644 index 0000000000..1f6008c425 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_32_47.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_32_47 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_32_47_seg.c b/drivers/event/cnxk/tx/cn20k/tx_32_47_seg.c new file mode 100644 index 0000000000..587f22df3a --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_32_47_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_32_47 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_48_63.c b/drivers/event/cnxk/tx/cn20k/tx_48_63.c new file mode 100644 index 0000000000..c712825417 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_48_63.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_48_63 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_48_63_seg.c b/drivers/event/cnxk/tx/cn20k/tx_48_63_seg.c new file mode 100644 index 0000000000..1fc11ec904 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_48_63_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_48_63 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_64_79.c b/drivers/event/cnxk/tx/cn20k/tx_64_79.c new file mode 100644 index 0000000000..0e427f79d8 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_64_79.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_64_79 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_64_79_seg.c b/drivers/event/cnxk/tx/cn20k/tx_64_79_seg.c new file mode 100644 index 0000000000..6e1ae41b26 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_64_79_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_64_79 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_80_95.c b/drivers/event/cnxk/tx/cn20k/tx_80_95.c new file mode 100644 index 0000000000..8c87d2341d --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_80_95.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_80_95 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_80_95_seg.c b/drivers/event/cnxk/tx/cn20k/tx_80_95_seg.c new file mode 100644 index 0000000000..43a143f4bd --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_80_95_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_80_95 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_96_111.c b/drivers/event/cnxk/tx/cn20k/tx_96_111.c new file mode 100644 index 0000000000..1a43af8b02 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_96_111.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) SSO_TX(cn20k_sso_hws_tx_adptr_enq_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_96_111 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_96_111_seg.c b/drivers/event/cnxk/tx/cn20k/tx_96_111_seg.c new file mode 100644 index 0000000000..e0e1d8a4ef --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_96_111_seg.c @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if !defined(CNXK_DIS_TMPLT_FUNC) + +#define T(name, sz, flags) \ + SSO_TX_SEG(cn20k_sso_hws_tx_adptr_enq_seg_##name, sz, flags) + +NIX_TX_FASTPATH_MODES_96_111 +#undef T + +#endif diff --git a/drivers/event/cnxk/tx/cn20k/tx_all_offload.c b/drivers/event/cnxk/tx/cn20k/tx_all_offload.c new file mode 100644 index 0000000000..d2158a4256 --- /dev/null +++ b/drivers/event/cnxk/tx/cn20k/tx_all_offload.c @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2022 Marvell. + */ + +#include "cn20k_tx_worker.h" + +#ifdef _ROC_API_H_ +#error "roc_api.h is included" +#endif + +#if defined(CNXK_DIS_TMPLT_FUNC) + +uint16_t __rte_hot +cn20k_sso_hws_tx_adptr_enq_seg_all_offload(void *port, struct rte_event ev[], uint16_t nb_events) +{ + const uint32_t flags = (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_MBUF_NOFF_F | + NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F); + uint64_t cmd[8 + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; + + struct cn20k_sso_hws *ws = port; + RTE_SET_USED(nb_events); + return cn20k_sso_hws_event_tx(ws, &ev[0], cmd, (const uint64_t *)ws->tx_adptr_data, flags); +} + +uint16_t __rte_hot +cn20k_sso_hws_tx_adptr_enq_seg_all_offload_tst(void *port, struct rte_event ev[], + uint16_t nb_events) +{ + const uint32_t flags = + (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | + NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_MBUF_NOFF_F | NIX_TX_OFFLOAD_TSO_F | + NIX_TX_OFFLOAD_TSTAMP_F | NIX_TX_OFFLOAD_SECURITY_F | NIX_TX_MULTI_SEG_F); + uint64_t cmd[8 + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; + + struct cn20k_sso_hws *ws = port; + RTE_SET_USED(nb_events); + return cn20k_sso_hws_event_tx(ws, &ev[0], cmd, (const uint64_t *)ws->tx_adptr_data, flags); +} + +#endif diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 09e4107824..934fcafcfe 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -1483,10 +1483,6 @@ dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name) return ret; } -static inline uint16_t -dlb2_event_enqueue_delayed(void *event_port, - const struct rte_event events[]); - static inline uint16_t dlb2_event_enqueue_burst_delayed(void *event_port, const struct rte_event events[], @@ -1662,7 +1658,6 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, * performance reasons. */ if (qm_port->token_pop_mode == DELAYED_POP) { - dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed; dlb2->event_dev->enqueue_burst = dlb2_event_enqueue_burst_delayed; dlb2->event_dev->enqueue_new_burst = @@ -3304,20 +3299,6 @@ dlb2_event_enqueue_burst_delayed(void *event_port, return __dlb2_event_enqueue_burst(event_port, events, num, true); } -static inline uint16_t -dlb2_event_enqueue(void *event_port, - const struct rte_event events[]) -{ - return __dlb2_event_enqueue_burst(event_port, events, 1, false); -} - -static inline uint16_t -dlb2_event_enqueue_delayed(void *event_port, - const struct rte_event events[]) -{ - return __dlb2_event_enqueue_burst(event_port, events, 1, true); -} - static uint16_t dlb2_event_enqueue_new_burst(void *event_port, const struct rte_event events[], @@ -4312,12 +4293,6 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num, return cnt; } -static uint16_t -dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait) -{ - return dlb2_event_dequeue_burst(event_port, ev, 1, wait); -} - static uint16_t dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev, uint16_t num, uint64_t wait) @@ -4375,13 +4350,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev, return cnt; } -static uint16_t -dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev, - uint64_t wait) -{ - return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait); -} - static void dlb2_flush_port(struct rte_eventdev *dev, int port_id) { @@ -4693,19 +4661,15 @@ dlb2_entry_points_init(struct rte_eventdev *dev) /* Expose PMD's eventdev interface */ dev->dev_ops = &dlb2_eventdev_entry_ops; - dev->enqueue = dlb2_event_enqueue; dev->enqueue_burst = dlb2_event_enqueue_burst; dev->enqueue_new_burst = dlb2_event_enqueue_new_burst; dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst; dlb2 = dev->data->dev_private; - if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) { - dev->dequeue = dlb2_event_dequeue_sparse; + if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) dev->dequeue_burst = dlb2_event_dequeue_burst_sparse; - } else { - dev->dequeue = dlb2_event_dequeue; + else dev->dequeue_burst = dlb2_event_dequeue_burst; - } } int diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c index 85ccb586ef..853cc1ecf9 100644 --- a/drivers/event/dpaa/dpaa_eventdev.c +++ b/drivers/event/dpaa/dpaa_eventdev.c @@ -113,12 +113,6 @@ dpaa_event_enqueue_burst(void *port, const struct rte_event ev[], return nb_events; } -static uint16_t -dpaa_event_enqueue(void *port, const struct rte_event *ev) -{ - return dpaa_event_enqueue_burst(port, ev, 1); -} - static void drain_4_bytes(int fd, fd_set *fdset) { if (FD_ISSET(fd, fdset)) { @@ -233,12 +227,6 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[], return num_frames; } -static uint16_t -dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks) -{ - return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks); -} - static uint16_t dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks) @@ -311,14 +299,6 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[], return num_frames; } -static uint16_t -dpaa_event_dequeue_intr(void *port, - struct rte_event *ev, - uint64_t timeout_ticks) -{ - return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks); -} - static void dpaa_event_dev_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) @@ -1012,17 +992,14 @@ dpaa_event_dev_create(const char *name, const char *params, struct rte_vdev_devi priv = eventdev->data->dev_private; eventdev->dev_ops = &dpaa_eventdev_ops; - eventdev->enqueue = dpaa_event_enqueue; eventdev->enqueue_burst = dpaa_event_enqueue_burst; - if (dpaa_event_check_flags(params)) { - eventdev->dequeue = dpaa_event_dequeue; + if (dpaa_event_check_flags(params)) eventdev->dequeue_burst = dpaa_event_dequeue_burst; - } else { + else { priv->intr_mode = 1; eventdev->dev_ops->timeout_ticks = dpaa_event_dequeue_timeout_ticks_intr; - eventdev->dequeue = dpaa_event_dequeue_intr; eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr; } eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue; diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index f0b2c7de99..6c8ed3ff6b 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -202,12 +202,6 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], } -static uint16_t -dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev) -{ - return dpaa2_eventdev_enqueue_burst(port, ev, 1); -} - static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) { struct epoll_event epoll_ev; @@ -363,13 +357,6 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], return 0; } -static uint16_t -dpaa2_eventdev_dequeue(void *port, struct rte_event *ev, - uint64_t timeout_ticks) -{ - return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks); -} - static void dpaa2_eventdev_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info) @@ -1105,11 +1092,9 @@ dpaa2_eventdev_create(const char *name, struct rte_vdev_device *vdev) } eventdev->dev_ops = &dpaa2_eventdev_ops; - eventdev->enqueue = dpaa2_eventdev_enqueue; eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; - eventdev->dequeue = dpaa2_eventdev_dequeue; eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue; eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest; diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c index a68d3ac154..ea5b0d4b85 100644 --- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c +++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright 2017 NXP + * Copyright 2017, 2020, 2023 NXP * */ @@ -30,14 +30,27 @@ TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev); static struct dpcon_dev_list dpcon_dev_list = TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */ +static struct dpaa2_dpcon_dev *get_dpcon_from_id(uint32_t dpcon_id) +{ + struct dpaa2_dpcon_dev *dpcon_dev = NULL; + + /* Get DPCONC dev handle from list using index */ + TAILQ_FOREACH(dpcon_dev, &dpcon_dev_list, next) { + if (dpcon_dev->dpcon_id == dpcon_id) + break; + } + + return dpcon_dev; +} + static int rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused, - struct vfio_device_info *obj_info __rte_unused, - int dpcon_id) + struct vfio_device_info *obj_info __rte_unused, + struct rte_dpaa2_device *obj) { struct dpaa2_dpcon_dev *dpcon_node; struct dpcon_attr attr; - int ret; + int ret, dpcon_id = obj->object_id; /* Allocate DPAA2 dpcon handle */ dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0); @@ -105,9 +118,26 @@ void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon) } } + +static void +rte_dpaa2_close_dpcon_device(int object_id) +{ + struct dpaa2_dpcon_dev *dpcon_dev = NULL; + + dpcon_dev = get_dpcon_from_id((uint32_t)object_id); + + if (dpcon_dev) { + rte_dpaa2_free_dpcon_dev(dpcon_dev); + dpcon_close(&dpcon_dev->dpcon, CMD_PRI_LOW, dpcon_dev->token); + TAILQ_REMOVE(&dpcon_dev_list, dpcon_dev, next); + rte_free(dpcon_dev); + } +} + static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = { .dev_type = DPAA2_CON, .create = rte_dpaa2_create_dpcon_device, + .close = rte_dpaa2_close_dpcon_device, }; RTE_PMD_REGISTER_DPAA2_OBJECT(dpcon, rte_dpaa2_dpcon_obj); diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c index 1df3121b21..e819412639 100644 --- a/drivers/event/dsw/dsw_evdev.c +++ b/drivers/event/dsw/dsw_evdev.c @@ -453,11 +453,9 @@ dsw_probe(struct rte_vdev_device *vdev) return -EFAULT; dev->dev_ops = &dsw_evdev_ops; - dev->enqueue = dsw_event_enqueue; dev->enqueue_burst = dsw_event_enqueue_burst; dev->enqueue_new_burst = dsw_event_enqueue_new_burst; dev->enqueue_forward_burst = dsw_event_enqueue_forward_burst; - dev->dequeue = dsw_event_dequeue; dev->dequeue_burst = dsw_event_dequeue_burst; dev->maintain = dsw_event_maintain; diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h index e05cee0e6a..ce52498435 100644 --- a/drivers/event/dsw/dsw_evdev.h +++ b/drivers/event/dsw/dsw_evdev.h @@ -270,7 +270,6 @@ struct __rte_aligned(4) dsw_ctl_msg { struct dsw_queue_flow qfs[DSW_MAX_FLOWS_PER_MIGRATION]; }; -uint16_t dsw_event_enqueue(void *port, const struct rte_event *event); uint16_t dsw_event_enqueue_burst(void *port, const struct rte_event events[], uint16_t events_len); @@ -281,7 +280,6 @@ uint16_t dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[], uint16_t events_len); -uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait); uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num, uint64_t wait); void dsw_event_maintain(void *port, int op); diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c index 6eeeb6da93..e68fb19890 100644 --- a/drivers/event/dsw/dsw_event.c +++ b/drivers/event/dsw/dsw_event.c @@ -1323,12 +1323,6 @@ dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port) dsw_port_transmit_buffered(dsw, source_port, dest_port_id); } -uint16_t -dsw_event_enqueue(void *port, const struct rte_event *ev) -{ - return dsw_event_enqueue_burst(port, ev, unlikely(ev == NULL) ? 0 : 1); -} - static __rte_always_inline uint16_t dsw_event_enqueue_burst_generic(struct dsw_port *source_port, const struct rte_event events[], @@ -1462,12 +1456,6 @@ dsw_event_enqueue_forward_burst(void *port, const struct rte_event events[], events_len, 0); } -uint16_t -dsw_event_dequeue(void *port, struct rte_event *events, uint64_t wait) -{ - return dsw_event_dequeue_burst(port, events, 1, wait); -} - static void dsw_port_record_seen_events(struct dsw_port *port, struct rte_event *events, uint16_t num) diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 3a933b1db7..957fcab04e 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -717,10 +717,20 @@ ssovf_close(struct rte_eventdev *dev) } static int -ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) +ssovf_parsekv(const char *key, const char *value, void *opaque) { - int *flag = opaque; - *flag = !!atoi(value); + uint8_t *flag = opaque; + uint64_t v; + char *end; + + errno = 0; + v = strtoul(value, &end, 0); + if ((errno != 0) || (value == end) || *end != '\0' || v > 1) { + ssovf_log_err("invalid %s value %s", key, value); + return -EINVAL; + } + + *flag = !!v; return 0; } diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h index 1997626438..0f81115a37 100644 --- a/drivers/event/octeontx/ssovf_evdev.h +++ b/drivers/event/octeontx/ssovf_evdev.h @@ -172,7 +172,6 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev) extern int otx_logtype_ssovf; #define RTE_LOGTYPE_OTX_SSOVF otx_logtype_ssovf -uint16_t ssows_enq(void *port, const struct rte_event *ev); uint16_t ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events); uint16_t ssows_enq_new_burst(void *port, diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c index 36454939ea..89b5dc056c 100644 --- a/drivers/event/octeontx/ssovf_worker.c +++ b/drivers/event/octeontx/ssovf_worker.c @@ -148,12 +148,14 @@ ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[], \ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R -__rte_always_inline uint16_t __rte_hot -ssows_enq(void *port, const struct rte_event *ev) +uint16_t __rte_hot +ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { struct ssows *ws = port; uint16_t ret = 1; + RTE_SET_USED(nb_events); + switch (ev->op) { case RTE_EVENT_OP_NEW: rte_smp_wmb(); @@ -171,13 +173,6 @@ ssows_enq(void *port, const struct rte_event *ev) return ret; } -uint16_t __rte_hot -ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events) -{ - RTE_SET_USED(nb_events); - return ssows_enq(port, ev); -} - uint16_t __rte_hot ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events) { @@ -336,7 +331,6 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev) { struct ssovf_evdev *edev = ssovf_pmd_priv(dev); - dev->enqueue = ssows_enq; dev->enqueue_burst = ssows_enq_burst; dev->enqueue_new_burst = ssows_enq_new_burst; dev->enqueue_forward_burst = ssows_enq_fwd_burst; @@ -360,19 +354,6 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev) dev->txa_enqueue_same_dest = dev->txa_enqueue; /* Assigning dequeue func pointers */ - const event_dequeue_t ssow_deq[2][2][2] = { -#define R(name, f2, f1, f0, flags) \ - [f2][f1][f0] = ssows_deq_ ##name, - -SSO_RX_ADPTR_ENQ_FASTPATH_FUNC -#undef R - }; - - dev->dequeue = ssow_deq - [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)] - [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)] - [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; - const event_dequeue_burst_t ssow_deq_burst[2][2][2] = { #define R(name, f2, f1, f0, flags) \ [f2][f1][f0] = ssows_deq_burst_ ##name, @@ -387,19 +368,6 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; if (edev->is_timeout_deq) { - const event_dequeue_t ssow_deq_timeout[2][2][2] = { -#define R(name, f2, f1, f0, flags) \ - [f2][f1][f0] = ssows_deq_timeout_ ##name, - -SSO_RX_ADPTR_ENQ_FASTPATH_FUNC -#undef R - }; - - dev->dequeue = ssow_deq_timeout - [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)] - [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)] - [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)]; - const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = { #define R(name, f2, f1, f0, flags) \ [f2][f1][f0] = ssows_deq_timeout_burst_ ##name, diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c index 25853166bf..ffa65ef930 100644 --- a/drivers/event/opdl/opdl_evdev.c +++ b/drivers/event/opdl/opdl_evdev.c @@ -718,11 +718,9 @@ opdl_probe(struct rte_vdev_device *vdev) dev->dev_ops = &evdev_opdl_ops; - dev->enqueue = opdl_event_enqueue; dev->enqueue_burst = opdl_event_enqueue_burst; dev->enqueue_new_burst = opdl_event_enqueue_burst; dev->enqueue_forward_burst = opdl_event_enqueue_burst; - dev->dequeue = opdl_event_dequeue; dev->dequeue_burst = opdl_event_dequeue_burst; if (rte_eal_process_type() != RTE_PROC_PRIMARY) diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c index 848b3be82c..73a1e4e008 100644 --- a/drivers/event/skeleton/skeleton_eventdev.c +++ b/drivers/event/skeleton/skeleton_eventdev.c @@ -25,18 +25,6 @@ #define EVENTDEV_NAME_SKELETON_PMD event_skeleton /**< Skeleton event device PMD name */ -static uint16_t -skeleton_eventdev_enqueue(void *port, const struct rte_event *ev) -{ - struct skeleton_port *sp = port; - - RTE_SET_USED(sp); - RTE_SET_USED(ev); - RTE_SET_USED(port); - - return 0; -} - static uint16_t skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[], uint16_t nb_events) @@ -51,19 +39,6 @@ skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[], return 0; } -static uint16_t -skeleton_eventdev_dequeue(void *port, struct rte_event *ev, - uint64_t timeout_ticks) -{ - struct skeleton_port *sp = port; - - RTE_SET_USED(sp); - RTE_SET_USED(ev); - RTE_SET_USED(timeout_ticks); - - return 0; -} - static uint16_t skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks) @@ -350,9 +325,7 @@ skeleton_eventdev_init(struct rte_eventdev *eventdev) PMD_DRV_FUNC_TRACE(); eventdev->dev_ops = &skeleton_eventdev_ops; - eventdev->enqueue = skeleton_eventdev_enqueue; eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; - eventdev->dequeue = skeleton_eventdev_dequeue; eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; /* For secondary processes, the primary has done all the work */ @@ -440,9 +413,7 @@ skeleton_eventdev_create(const char *name, int socket_id, struct rte_vdev_device } eventdev->dev_ops = &skeleton_eventdev_ops; - eventdev->enqueue = skeleton_eventdev_enqueue; eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; - eventdev->dequeue = skeleton_eventdev_dequeue; eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; event_dev_probing_finish(eventdev); diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index 7913bc547e..3ad82e94ac 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -1081,11 +1081,9 @@ sw_probe(struct rte_vdev_device *vdev) return -EFAULT; } dev->dev_ops = &evdev_sw_ops; - dev->enqueue = sw_event_enqueue; dev->enqueue_burst = sw_event_enqueue_burst; dev->enqueue_new_burst = sw_event_enqueue_burst; dev->enqueue_forward_burst = sw_event_enqueue_burst; - dev->dequeue = sw_event_dequeue; dev->dequeue_burst = sw_event_dequeue_burst; if (rte_eal_process_type() != RTE_PROC_PRIMARY) diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h index c6e649c70a..f617c7d966 100644 --- a/drivers/event/sw/sw_evdev.h +++ b/drivers/event/sw/sw_evdev.h @@ -288,11 +288,9 @@ sw_pmd_priv_const(const struct rte_eventdev *eventdev) return eventdev->data->dev_private; } -uint16_t sw_event_enqueue(void *port, const struct rte_event *ev); uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num); -uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait); uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num, uint64_t wait); int32_t sw_event_schedule(struct rte_eventdev *dev); diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c index 063b919c7e..4215726513 100644 --- a/drivers/event/sw/sw_evdev_worker.c +++ b/drivers/event/sw/sw_evdev_worker.c @@ -131,12 +131,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num) return enq; } -uint16_t -sw_event_enqueue(void *port, const struct rte_event *ev) -{ - return sw_event_enqueue_burst(port, ev, 1); -} - uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num, uint64_t wait) @@ -178,9 +172,3 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num, end: return ndeq; } - -uint16_t -sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait) -{ - return sw_event_dequeue_burst(port, ev, 1, wait); -} diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c index 886fb7fbb0..c054988513 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c @@ -23,7 +23,7 @@ #include #include "rte_dpaa2_mempool.h" -#include "fslmc_vfio.h" +#include #include #include #include diff --git a/drivers/meson.build b/drivers/meson.build index 5270160c56..495e21b54a 100644 --- a/drivers/meson.build +++ b/drivers/meson.build @@ -29,6 +29,7 @@ subdirs = [ 'event', # depends on common, bus, mempool and net. 'baseband', # depends on common and bus. 'gpu', # depends on common and bus. + 'power', ] if meson.is_cross_build() @@ -59,9 +60,12 @@ default_cflags = machine_args default_cflags += ['-DALLOW_EXPERIMENTAL_API'] default_cflags += ['-DALLOW_INTERNAL_API'] -if cc.has_argument('-Wno-format-truncation') - default_cflags += '-Wno-format-truncation' -endif +warning_disable_cflags = ['-Wno-format-truncation', '-Wno-address-of-packed-member'] +foreach cflag:warning_disable_cflags + if cc.has_argument(cflag) + default_cflags += cflag + endif +endforeach dpdk_drivers_build_dir = meson.current_build_dir() diff --git a/drivers/ml/cnxk/cn10k_ml_ocm.c b/drivers/ml/cnxk/cn10k_ml_ocm.c index 749ddeb344..0032fe82da 100644 --- a/drivers/ml/cnxk/cn10k_ml_ocm.c +++ b/drivers/ml/cnxk/cn10k_ml_ocm.c @@ -2,6 +2,7 @@ * Copyright (c) 2022 Marvell. */ +#include #include #include @@ -203,11 +204,11 @@ cn10k_ml_ocm_tilecount(uint64_t tilemask, int *start, int *end) PLT_ASSERT(tilemask != 0); - *start = __builtin_ctzl(tilemask); - *end = 64 - __builtin_clzl(tilemask) - 1; + *start = rte_ctz64(tilemask); + *end = 64 - rte_clz64(tilemask) - 1; count = *end - *start + 1; - PLT_ASSERT(count == __builtin_popcountl(tilemask)); + PLT_ASSERT(count == rte_popcount64(tilemask)); return count; } diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index bfd3bd1eb1..ceb8d9356a 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -691,7 +691,7 @@ open_packet_iface(const char *key __rte_unused, int *sockfd = extra_args; /* Open an AF_PACKET socket... */ - *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + *sockfd = socket(AF_PACKET, SOCK_RAW, 0); if (*sockfd == -1) { PMD_LOG(ERR, "Could not open AF_PACKET socket"); return -1; @@ -819,7 +819,7 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, for (q = 0; q < nb_queues; q++) { /* Open an AF_PACKET socket for this queue... */ - qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + qsockfd = socket(AF_PACKET, SOCK_RAW, 0); if (qsockfd == -1) { PMD_LOG_ERRNO(ERR, "%s: could not open AF_PACKET socket", diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 1f7c0d77d5..136e308437 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -910,6 +910,7 @@ static int bnxt_start_nic(struct bnxt *bp) struct bnxt_rx_queue *rxq = bp->rx_queues[j]; if (!rxq->rx_deferred_start) { + __rte_assume(j < RTE_MAX_QUEUES_PER_PORT); bp->eth_dev->data->rx_queue_state[j] = RTE_ETH_QUEUE_STATE_STARTED; rxq->rx_started = true; @@ -930,6 +931,7 @@ static int bnxt_start_nic(struct bnxt *bp) struct bnxt_tx_queue *txq = bp->tx_queues[j]; if (!txq->tx_deferred_start) { + __rte_assume(j < RTE_MAX_QUEUES_PER_PORT); bp->eth_dev->data->tx_queue_state[j] = RTE_ETH_QUEUE_STATE_STARTED; txq->tx_started = true; diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index 1c25c57ca6..1651c26545 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -584,6 +584,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -EINVAL; } + __rte_assume(q_id < RTE_MAX_QUEUES_PER_PORT); dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED; rxq->rx_started = false; PMD_DRV_LOG_LINE(DEBUG, "Rx queue stopped"); diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c index 355d41bbd3..840b21cef9 100644 --- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c +++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c @@ -4,7 +4,7 @@ #include #include -#include +#include #include #include #include @@ -290,7 +290,7 @@ recv_burst_vec_neon(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (valid == 0) num_valid = 4; else - num_valid = __builtin_ctzl(valid) / 16; + num_valid = rte_ctz64(valid) / 16; if (num_valid == 0) break; diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c index 2e6ea43ac1..aac974a970 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c +++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c @@ -3,7 +3,9 @@ * All rights reserved. */ +#include #include + #include "bnxt.h" #include "bnxt_tf_common.h" #include "ulp_utils.h" @@ -938,7 +940,7 @@ ulp_flow_db_next_entry_get(struct bnxt_ulp_flow_db *flow_db, */ if (s_idx == idx) bs &= (-1UL >> mod_fid); - lfid = (idx * ULP_INDEX_BITMAP_SIZE) + __builtin_clzl(bs); + lfid = (idx * ULP_INDEX_BITMAP_SIZE) + rte_clz64(bs); if (*fid >= lfid) { BNXT_TF_DBG(ERR, "Flow Database is corrupt\n"); return -ENOENT; @@ -1480,7 +1482,7 @@ ulp_flow_db_parent_child_flow_next_entry_get(struct bnxt_ulp_flow_db *flow_db, */ if (s_idx == idx) bs &= (-1UL >> mod_fid); - next_fid = (idx * ULP_INDEX_BITMAP_SIZE) + __builtin_clzl(bs); + next_fid = (idx * ULP_INDEX_BITMAP_SIZE) + rte_clz64(bs); if (*child_fid >= next_fid) { BNXT_TF_DBG(ERR, "Parent Child Database is corrupt\n"); return -ENOENT; diff --git a/drivers/net/bnxt/tf_ulp/ulp_gen_hash.c b/drivers/net/bnxt/tf_ulp/ulp_gen_hash.c index d746fbbd4e..9f27b56334 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_gen_hash.c +++ b/drivers/net/bnxt/tf_ulp/ulp_gen_hash.c @@ -3,8 +3,10 @@ * All rights reserved. */ +#include #include #include + #include "bnxt_tf_common.h" #include "ulp_gen_hash.h" #include "ulp_utils.h" @@ -25,7 +27,7 @@ int32_t ulp_bit_alloc_list_alloc(struct bit_alloc_list *blist, if (idx <= bsize_64) { if (bentry) - jdx = __builtin_clzl(~bentry); + jdx = rte_clz64(~bentry); *index = ((idx - 1) * ULP_INDEX_BITMAP_SIZE) + jdx; ULP_INDEX_BITMAP_SET(blist->bdata[(idx - 1)], jdx); return 0; diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h index e59ff8793e..4f79ff9b85 100644 --- a/drivers/net/bonding/rte_eth_bond.h +++ b/drivers/net/bonding/rte_eth_bond.h @@ -125,7 +125,6 @@ rte_eth_bond_free(const char *name); * @return * 0 on success, negative value otherwise */ -__rte_experimental int rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id); @@ -138,7 +137,6 @@ rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id); * @return * 0 on success, negative value otherwise */ -__rte_experimental int rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id); @@ -199,7 +197,6 @@ rte_eth_bond_primary_get(uint16_t bonding_port_id); * Number of members associated with bonding device on success, * negative value otherwise */ -__rte_experimental int rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[], uint16_t len); @@ -216,7 +213,6 @@ rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[], * Number of active members associated with bonding device on success, * negative value otherwise */ -__rte_experimental int rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[], uint16_t len); diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h index b2deb26e2e..5432eafcfe 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.h +++ b/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -193,7 +193,6 @@ rte_eth_bond_8023ad_setup(uint16_t port_id, * -EINVAL if conf is NULL or member id is invalid (not a member of given * bonding device or is not inactive). */ -__rte_experimental int rte_eth_bond_8023ad_member_info(uint16_t port_id, uint16_t member_id, struct rte_eth_bond_8023ad_member_info *conf); diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index cda1c37124..91bf2c2345 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -3982,7 +3983,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) * Two '1' in binary of 'link_speeds': bit0 and a unique * speed bit. */ - if (__builtin_popcountl(link_speeds) != 2) { + if (rte_popcount64(link_speeds) != 2) { RTE_BOND_LOG(ERR, "please set a unique speed."); return -EINVAL; } diff --git a/drivers/net/bonding/version.map b/drivers/net/bonding/version.map index a309469b1f..eb37dadf76 100644 --- a/drivers/net/bonding/version.map +++ b/drivers/net/bonding/version.map @@ -11,12 +11,17 @@ DPDK_25 { rte_eth_bond_8023ad_ext_distrib; rte_eth_bond_8023ad_ext_distrib_get; rte_eth_bond_8023ad_ext_slowtx; + rte_eth_bond_8023ad_member_info; rte_eth_bond_8023ad_setup; + rte_eth_bond_active_members_get; rte_eth_bond_create; rte_eth_bond_free; rte_eth_bond_link_monitoring_set; rte_eth_bond_mac_address_reset; rte_eth_bond_mac_address_set; + rte_eth_bond_member_add; + rte_eth_bond_member_remove; + rte_eth_bond_members_get; rte_eth_bond_mode_get; rte_eth_bond_mode_set; rte_eth_bond_primary_get; @@ -26,13 +31,3 @@ DPDK_25 { local: *; }; - -EXPERIMENTAL { - # added in 23.11 - global: - rte_eth_bond_8023ad_member_info; - rte_eth_bond_active_members_get; - rte_eth_bond_member_add; - rte_eth_bond_member_remove; - rte_eth_bond_members_get; -}; diff --git a/drivers/net/cnxk/cnxk_eswitch_rxtx.c b/drivers/net/cnxk/cnxk_eswitch_rxtx.c index 6df4ecd762..832c4e5d5c 100644 --- a/drivers/net/cnxk/cnxk_eswitch_rxtx.c +++ b/drivers/net/cnxk/cnxk_eswitch_rxtx.c @@ -120,6 +120,7 @@ cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, { struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs; struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs; + uint64_t cmd[6 + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; uint16_t lmt_id, pkt = 0, nb_tx = 0; struct nix_send_ext_s *send_hdr_ext; struct nix_send_hdr_s *send_hdr; @@ -128,11 +129,9 @@ cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, union nix_send_sg_s *sg; uintptr_t lmt_base, pa; int64_t fc_pkts, dw_m1; - uint64_t cmd_cn9k[16]; struct rte_mbuf *m; rte_iova_t io_addr; uint16_t segdw; - uint64_t *cmd; uint64_t len; uint8_t off; @@ -149,12 +148,7 @@ cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */ dw_m1 = cn10k_nix_tx_ext_subs(flags) + 1; - if (roc_model_is_cn9k()) { - memset(cmd_cn9k, 0, sizeof(cmd_cn9k)); - cmd = &cmd_cn9k[0]; - } else { - cmd = (uint64_t *)lmt_base; - } + memset(cmd, 0, sizeof(cmd)); send_hdr = (struct nix_send_hdr_s *)&cmd[0]; send_hdr->w0.sq = sq->qid; @@ -204,6 +198,7 @@ cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, if (roc_model_is_cn9k()) { nix_cn9k_xmit_one(cmd, sq->lmt_addr, sq->io_addr, segdw); } else { + cn10k_nix_xmit_mv_lmt_base(lmt_base, cmd, flags); /* PA<6:4> = LMTST size-1 in units of 128 bits. Size of the first LMTST in * burst. */ diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c index d3c20e8315..e42e2f8deb 100644 --- a/drivers/net/cnxk/cnxk_flow.c +++ b/drivers/net/cnxk/cnxk_flow.c @@ -5,6 +5,9 @@ #include #define IS_REP_BIT 7 + +#define TNL_DCP_MATCH_ID 5 +#define NRML_MATCH_ID 1 const struct cnxk_rte_flow_term_info term[] = { [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct rte_flow_item_eth)}, [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN, sizeof(struct rte_flow_item_vlan)}, @@ -187,12 +190,96 @@ roc_npc_parse_sample_subaction(struct rte_eth_dev *eth_dev, const struct rte_flo return 0; } +static int +append_mark_action(struct roc_npc_action *in_actions, uint8_t has_tunnel_pattern, + uint64_t *free_allocs, int *act_cnt) +{ + struct rte_flow_action_mark *act_mark; + int i = *act_cnt, j = 0; + + /* Add Mark action */ + i++; + act_mark = plt_zmalloc(sizeof(struct rte_flow_action_mark), 0); + if (!act_mark) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)act_mark; + /* Mark ID format: (tunnel type - VxLAN, Geneve << 6) | Tunnel decap */ + act_mark->id = + has_tunnel_pattern ? ((has_tunnel_pattern << 6) | TNL_DCP_MATCH_ID) : NRML_MATCH_ID; + in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK; + in_actions[i].conf = (struct rte_flow_action_mark *)act_mark; + + plt_rep_dbg("Assigned mark ID %x", act_mark->id); + + *act_cnt = i; + + return 0; +} + +static int +append_rss_action(struct cnxk_eth_dev *dev, struct roc_npc_action *in_actions, uint16_t nb_rxq, + uint32_t *flowkey_cfg, uint64_t *free_allocs, uint16_t rss_repte_pf_func, + int *act_cnt) +{ + struct roc_npc_action_rss *rss_conf; + int i = *act_cnt, j = 0, l, rc = 0; + uint16_t *queue_arr; + + rss_conf = plt_zmalloc(sizeof(struct roc_npc_action_rss), 0); + if (!rss_conf) { + plt_err("Failed to allocate memory for rss conf"); + rc = -ENOMEM; + goto fail; + } + + /* Add RSS action */ + rss_conf->queue_num = nb_rxq; + queue_arr = calloc(1, rss_conf->queue_num * sizeof(uint16_t)); + if (!queue_arr) { + plt_err("Failed to allocate memory for rss queue"); + rc = -ENOMEM; + goto free_rss; + } + + for (l = 0; l < nb_rxq; l++) + queue_arr[l] = l; + rss_conf->queue = queue_arr; + rss_conf->key = NULL; + rss_conf->types = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP; + + i++; + + in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; + in_actions[i].conf = (struct roc_npc_action_rss *)rss_conf; + in_actions[i].rss_repte_pf_func = rss_repte_pf_func; + + npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, + RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP); + + *act_cnt = i; + + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)rss_conf; + + return 0; +free_rss: + rte_free(rss_conf); +fail: + return rc; +} + static int representor_rep_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev *eth_dev, struct rte_eth_dev *portid_eth_dev, enum rte_flow_action_type act_type, uint8_t rep_pattern, - uint16_t *dst_pf_func, bool is_rep, uint64_t *free_allocs, - int *act_cnt) + uint16_t *dst_pf_func, bool is_rep, uint8_t has_tunnel_pattern, + uint64_t *free_allocs, int *act_cnt, uint32_t *flowkey_cfg) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); struct rte_eth_dev *rep_eth_dev = portid_eth_dev; @@ -203,7 +290,7 @@ representor_rep_portid_action(struct roc_npc_action *in_actions, struct rte_eth_ struct cnxk_rep_dev *rep_dev; struct roc_npc *npc; uint16_t vlan_tci; - int j = 0; + int j = 0, rc; /* For inserting an action in the list */ int i = *act_cnt; @@ -322,6 +409,24 @@ representor_rep_portid_action(struct roc_npc_action *in_actions, struct rte_eth_ in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; npc->rep_act_pf_func = rep_dev->hw_func; *dst_pf_func = rep_dev->hw_func; + + /* Append a mark action - needed to identify the flow */ + rc = append_mark_action(in_actions, has_tunnel_pattern, free_allocs, &i); + if (rc) + return rc; + /* Append RSS action if representee has RSS enabled */ + if (rep_dev->nb_rxq > 1) { + /* PF can install rule for only its VF acting as representee */ + if (rep_dev->hw_func && + roc_eswitch_is_repte_pfs_vf(rep_dev->hw_func, + roc_nix_get_pf_func(npc->roc_nix))) { + rc = append_rss_action(dev, in_actions, rep_dev->nb_rxq, + flowkey_cfg, free_allocs, + rep_dev->hw_func, &i); + if (rc) + return rc; + } + } } } done: @@ -336,34 +441,21 @@ representor_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev int *act_cnt) { struct rte_eth_dev *rep_eth_dev = portid_eth_dev; - struct rte_flow_action_mark *act_mark; struct cnxk_rep_dev *rep_dev; /* For inserting an action in the list */ - int i = *act_cnt, j = 0; + int i = *act_cnt, rc; rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); *dst_pf_func = rep_dev->hw_func; - /* Add Mark action */ - i++; - act_mark = plt_zmalloc(sizeof(struct rte_flow_action_mark), 0); - if (!act_mark) { - plt_err("Error allocation memory"); - return -ENOMEM; - } - - while (free_allocs[j] != 0) - j++; - free_allocs[j] = (uint64_t)act_mark; - /* Mark ID format: (tunnel type - VxLAN, Geneve << 6) | Tunnel decap */ - act_mark->id = has_tunnel_pattern ? ((has_tunnel_pattern << 6) | 5) : 1; - in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK; - in_actions[i].conf = (struct rte_flow_action_mark *)act_mark; + rc = append_mark_action(in_actions, has_tunnel_pattern, free_allocs, &i); + if (rc) + return rc; *act_cnt = i; - plt_rep_dbg("Rep port %d ID %d mark ID is %d rep_dev->hw_func 0x%x", rep_dev->port_id, - rep_dev->rep_id, act_mark->id, rep_dev->hw_func); + plt_rep_dbg("Rep port %d ID %d rep_dev->hw_func 0x%x", rep_dev->port_id, rep_dev->rep_id, + rep_dev->hw_func); return 0; } @@ -439,9 +531,9 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, eth_dev->data->port_id, if_name, act_ethdev->port_id); if (cnxk_ethdev_is_representor(if_name)) { if (representor_rep_portid_action(in_actions, eth_dev, - portid_eth_dev, actions->type, - rep_pattern, dst_pf_func, is_rep, - free_allocs, &i)) { + portid_eth_dev, actions->type, rep_pattern, + dst_pf_func, is_rep, has_tunnel_pattern, + free_allocs, &i, flowkey_cfg)) { plt_err("Representor port action set failed"); goto err_exit; } @@ -509,6 +601,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, rc = npc_rss_action_validate(eth_dev, attr, actions); if (rc) goto err_exit; + in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; in_actions[i].conf = actions->conf; npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, @@ -829,8 +922,8 @@ cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr const struct rte_flow_action actions[], struct rte_flow_error *error, bool is_rep) { - struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1]; - struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT]; + struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1] = {0}; + struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT] = {0}; struct roc_npc_action_sample in_sample_action; struct cnxk_rep_dev *rep_dev = NULL; struct roc_npc_flow *flow = NULL; @@ -1067,11 +1160,8 @@ cnxk_flow_dev_dump_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FI } if (flow != NULL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, - "Invalid argument"); - return -EINVAL; + roc_npc_flow_mcam_dump(file, npc, (struct roc_npc_flow *)flow); + return 0; } roc_npc_flow_dump(file, npc, -1); diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c index 652d419ad8..e536898d3a 100644 --- a/drivers/net/cnxk/cnxk_rep.c +++ b/drivers/net/cnxk/cnxk_rep.c @@ -29,12 +29,15 @@ switch_domain_id_allocate(struct cnxk_eswitch_dev *eswitch_dev, uint16_t pf) } int -cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id) +cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint32_t state, uint16_t *rep_id) { struct cnxk_rep_dev *rep_dev = NULL; struct rte_eth_dev *rep_eth_dev; + uint16_t hw_func, nb_rxq; int i, rc = 0; + nb_rxq = state & 0xFFFF; + hw_func = (state >> 16) & 0xFFFF; /* Delete the individual PFVF flows as common eswitch VF rule will be used. */ rc = cnxk_eswitch_flow_rules_delete(eswitch_dev, hw_func); if (rc) { @@ -61,8 +64,10 @@ cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, ui } rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); - if (rep_dev->hw_func == hw_func && rep_dev->is_vf_active) + if (rep_dev->hw_func == hw_func && rep_dev->is_vf_active) { rep_dev->native_repte = false; + rep_dev->nb_rxq = nb_rxq; + } } return 0; @@ -253,12 +258,30 @@ cnxk_representee_state_msg_process(struct cnxk_eswitch_dev *eswitch_dev, uint16_ static int cnxk_representee_mtu_msg_process(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, - uint16_t rep_id, uint16_t mtu) + uint16_t mtu) { + struct cnxk_eswitch_devargs *esw_da; struct cnxk_rep_dev *rep_dev = NULL; struct rte_eth_dev *rep_eth_dev; + uint16_t rep_id = UINT16_MAX; int rc = 0; - int i; + int i, j; + + /* Traversing the initialized represented list */ + for (i = 0; i < eswitch_dev->nb_esw_da; i++) { + esw_da = &eswitch_dev->esw_da[i]; + for (j = 0; j < esw_da->nb_repr_ports; j++) { + if (esw_da->repr_hw_info[j].hw_func == hw_func) { + rep_id = esw_da->repr_hw_info[j].rep_id; + break; + } + } + if (rep_id != UINT16_MAX) + break; + } + /* No action on PF func for which representor has not been created */ + if (rep_id == UINT16_MAX) + goto done; for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) { rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev; @@ -289,17 +312,20 @@ cnxk_representee_msg_process(struct cnxk_eswitch_dev *eswitch_dev, switch (notify_msg->type) { case ROC_ESWITCH_REPTE_STATE: - plt_rep_dbg(" type %d: hw_func %x action %s", notify_msg->type, - notify_msg->state.hw_func, + plt_rep_dbg(" REPTE STATE: hw_func %x action %s", notify_msg->state.hw_func, notify_msg->state.enable ? "enable" : "disable"); rc = cnxk_representee_state_msg_process(eswitch_dev, notify_msg->state.hw_func, notify_msg->state.enable); break; + case ROC_ESWITCH_LINK_STATE: + plt_rep_dbg(" LINK STATE: hw_func %x action %s", notify_msg->link.hw_func, + notify_msg->link.enable ? "enable" : "disable"); + break; case ROC_ESWITCH_REPTE_MTU: - plt_rep_dbg(" type %d: hw_func %x rep_id %d mtu %d", notify_msg->type, - notify_msg->mtu.hw_func, notify_msg->mtu.rep_id, notify_msg->mtu.mtu); + plt_rep_dbg(" REPTE MTU: hw_func %x rep_id %d mtu %d", notify_msg->mtu.hw_func, + notify_msg->mtu.rep_id, notify_msg->mtu.mtu); rc = cnxk_representee_mtu_msg_process(eswitch_dev, notify_msg->mtu.hw_func, - notify_msg->mtu.rep_id, notify_msg->mtu.mtu); + notify_msg->mtu.mtu); break; default: plt_err("Invalid notification msg received %d", notify_msg->type); diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index aaae2d4e8f..b9601854ce 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -62,7 +62,10 @@ struct cnxk_rep_dev { uint16_t rep_id; uint16_t switch_domain_id; struct cnxk_eswitch_dev *parent_dev; + /* Representee HW func */ uint16_t hw_func; + /* No of queues configured at representee */ + uint16_t nb_rxq; bool is_vf_active; bool native_repte; struct cnxk_rep_rxq *rxq; @@ -130,7 +133,7 @@ int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev); int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats); int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev); int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops); -int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id); +int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint32_t state, uint16_t *rep_id); int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev); int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev); int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr); diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h index bfd9ce9c7b..f5cb636c6c 100644 --- a/drivers/net/cnxk/cnxk_rep_msg.h +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -86,7 +86,7 @@ typedef struct cnxk_rep_msg_ack_data1 { typedef struct cnxk_rep_msg_ready_data { uint8_t val; uint16_t nb_ports; - uint16_t data[]; + uint32_t data[]; } __rte_packed cnxk_rep_msg_ready_data_t; /* Exit msg */ diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c index 2c75ea6577..689ed82f18 100644 --- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c +++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c @@ -10,6 +10,8 @@ #include #include #include + +#include #include #include #include @@ -20,6 +22,7 @@ #include #include #include + #include "cpfl_rules.h" #include "cpfl_logs.h" #include "cpfl_ethdev.h" @@ -298,11 +301,6 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf, PMD_DRV_LOG(ERR, "Cannot use port_representor action for the represented_port"); goto err; } - if (action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT && - dst_itf->type == CPFL_ITF_TYPE_VPORT) { - PMD_DRV_LOG(ERR, "Cannot use represented_port action for the local vport"); - goto err; - } if (is_vsi) dev_id = cpfl_get_vsi_id(dst_itf); else @@ -608,7 +606,7 @@ cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad) if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab)) return CPFL_MAX_MOD_CONTENT_INDEX; - pos += __builtin_ffsll(slab) - 1; + pos += rte_bsf64(slab); rte_bitmap_clear(ad->mod_bm, pos); return pos; diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c index 4d33b51fea..20b37a97bb 100644 --- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c +++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -30,8 +30,7 @@ dpaa2_distset_to_dpkg_profile_cfg( int rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, - uint16_t offset, - uint8_t size) + uint16_t offset, uint8_t size) { struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; @@ -52,8 +51,8 @@ rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, return -EINVAL; } - p_params = rte_zmalloc( - NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + p_params = rte_zmalloc(NULL, + DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; @@ -73,17 +72,23 @@ rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, } memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); - tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(p_params, + DIST_PARAM_IOVA_SIZE); + if (tc_cfg.key_cfg_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", + __func__, p_params); + rte_free(p_params); + return -ENOBUFS; + } + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, - &tc_cfg); + &tc_cfg); rte_free(p_params); if (ret) { - DPAA2_PMD_ERR( - "Setting distribution for Rx failed with err: %d", - ret); + DPAA2_PMD_ERR("Set RX TC dist failed(err=%d)", ret); return ret; } @@ -115,8 +120,8 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, if (tc_dist_queues > priv->dist_queues) tc_dist_queues = priv->dist_queues; - p_params = rte_malloc( - NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + p_params = rte_malloc(NULL, + DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; @@ -133,7 +138,15 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, return ret; } - tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(p_params, + DIST_PARAM_IOVA_SIZE); + if (tc_cfg.key_cfg_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", + __func__, p_params); + rte_free(p_params); + return -ENOBUFS; + } + tc_cfg.dist_size = tc_dist_queues; tc_cfg.enable = true; tc_cfg.tc = tc_index; @@ -148,17 +161,15 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg); rte_free(p_params); if (ret) { - DPAA2_PMD_ERR( - "Setting distribution for Rx failed with err: %d", - ret); + DPAA2_PMD_ERR("RX Hash dist for failed(err=%d)", ret); return ret; } return 0; } -int dpaa2_remove_flow_dist( - struct rte_eth_dev *eth_dev, +int +dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev, uint8_t tc_index) { struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; @@ -168,8 +179,8 @@ int dpaa2_remove_flow_dist( void *p_params; int ret; - p_params = rte_malloc( - NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + p_params = rte_malloc(NULL, + DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; @@ -177,7 +188,15 @@ int dpaa2_remove_flow_dist( memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg)); tc_cfg.dist_size = 0; - tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(p_params, + DIST_PARAM_IOVA_SIZE); + if (tc_cfg.key_cfg_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", + __func__, p_params); + rte_free(p_params); + return -ENOBUFS; + } + tc_cfg.enable = true; tc_cfg.tc = tc_index; @@ -194,9 +213,7 @@ int dpaa2_remove_flow_dist( &tc_cfg); rte_free(p_params); if (ret) - DPAA2_PMD_ERR( - "Setting distribution for Rx failed with err: %d", - ret); + DPAA2_PMD_ERR("RX hash dist failed(err=%d)", ret); return ret; } diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 7b3e587a8d..8cbe481fb5 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -1,7 +1,7 @@ /* * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2021 NXP + * Copyright 2016-2024 NXP * */ @@ -75,6 +75,8 @@ int dpaa2_timestamp_dynfield_offset = -1; /* Enable error queue */ bool dpaa2_enable_err_queue; +bool dpaa2_print_parser_result; + #define MAX_NB_RX_DESC 11264 int total_nb_rx_desc; @@ -121,9 +123,9 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); - return -1; + return -EINVAL; } if (on) @@ -172,8 +174,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) static int dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, - enum rte_vlan_type vlan_type __rte_unused, - uint16_t tpid) + enum rte_vlan_type vlan_type __rte_unused, + uint16_t tpid) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = dev->process_private; @@ -210,8 +212,7 @@ dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, static int dpaa2_fw_version_get(struct rte_eth_dev *dev, - char *fw_version, - size_t fw_size) + char *fw_version, size_t fw_size) { int ret; struct fsl_mc_io *dpni = dev->process_private; @@ -243,7 +244,8 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, } static int -dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +dpaa2_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) { struct dpaa2_dev_priv *priv = dev->data->dev_private; @@ -289,8 +291,8 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) static int dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev, - __rte_unused uint16_t queue_id, - struct rte_eth_burst_mode *mode) + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) { struct rte_eth_conf *eth_conf = &dev->data->dev_conf; int ret = -EINVAL; @@ -366,7 +368,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) uint8_t num_rxqueue_per_tc; struct dpaa2_queue *mc_q, *mcq; uint32_t tot_queues; - int i; + int i, ret = 0; struct dpaa2_queue *dpaa2_q; PMD_INIT_FUNC_TRACE(); @@ -380,55 +382,45 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) RTE_CACHE_LINE_SIZE); if (!mc_q) { DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); - return -1; + return -ENOBUFS; } for (i = 0; i < priv->nb_rx_queues; i++) { mc_q->eth_data = dev->data; priv->rx_vq[i] = mc_q++; - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; - dpaa2_q->q_storage = rte_malloc("dq_storage", - sizeof(struct queue_storage_info_t), - RTE_CACHE_LINE_SIZE); - if (!dpaa2_q->q_storage) - goto fail; - - memset(dpaa2_q->q_storage, 0, - sizeof(struct queue_storage_info_t)); - if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + dpaa2_q = priv->rx_vq[i]; + ret = dpaa2_queue_storage_alloc(dpaa2_q, + RTE_MAX_LCORE); + if (ret) goto fail; } if (dpaa2_enable_err_queue) { priv->rx_err_vq = rte_zmalloc("dpni_rx_err", sizeof(struct dpaa2_queue), 0); - if (!priv->rx_err_vq) + if (!priv->rx_err_vq) { + ret = -ENOBUFS; goto fail; + } - dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; - dpaa2_q->q_storage = rte_malloc("err_dq_storage", - sizeof(struct queue_storage_info_t) * - RTE_MAX_LCORE, - RTE_CACHE_LINE_SIZE); - if (!dpaa2_q->q_storage) + dpaa2_q = priv->rx_err_vq; + ret = dpaa2_queue_storage_alloc(dpaa2_q, + RTE_MAX_LCORE); + if (ret) goto fail; - - memset(dpaa2_q->q_storage, 0, - sizeof(struct queue_storage_info_t)); - for (i = 0; i < RTE_MAX_LCORE; i++) - if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i])) - goto fail; } for (i = 0; i < priv->nb_tx_queues; i++) { mc_q->eth_data = dev->data; - mc_q->flow_id = 0xffff; + mc_q->flow_id = DPAA2_INVALID_FLOW_ID; priv->tx_vq[i] = mc_q++; dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; dpaa2_q->cscn = rte_malloc(NULL, sizeof(struct qbman_result), 16); - if (!dpaa2_q->cscn) + if (!dpaa2_q->cscn) { + ret = -ENOBUFS; goto fail_tx; + } } if (priv->flags & DPAA2_TX_CONF_ENABLE) { @@ -438,24 +430,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) mc_q->tc_index = i; mc_q->flow_id = 0; priv->tx_conf_vq[i] = mc_q++; - dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; - dpaa2_q->q_storage = - rte_malloc("dq_storage", - sizeof(struct queue_storage_info_t), - RTE_CACHE_LINE_SIZE); - if (!dpaa2_q->q_storage) - goto fail_tx_conf; - - memset(dpaa2_q->q_storage, 0, - sizeof(struct queue_storage_info_t)); - if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + dpaa2_q = priv->tx_conf_vq[i]; + ret = dpaa2_queue_storage_alloc(dpaa2_q, + RTE_MAX_LCORE); + if (ret) goto fail_tx_conf; } } vq_id = 0; for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { - mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; + mcq = priv->rx_vq[vq_id]; mcq->tc_index = dist_idx / num_rxqueue_per_tc; mcq->flow_id = dist_idx % num_rxqueue_per_tc; vq_id++; @@ -465,15 +450,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) fail_tx_conf: i -= 1; while (i >= 0) { - dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; - rte_free(dpaa2_q->q_storage); + dpaa2_q = priv->tx_conf_vq[i]; + dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); priv->tx_conf_vq[i--] = NULL; } i = priv->nb_tx_queues; fail_tx: i -= 1; while (i >= 0) { - dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_q = priv->tx_vq[i]; rte_free(dpaa2_q->cscn); priv->tx_vq[i--] = NULL; } @@ -482,21 +467,18 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) i -= 1; mc_q = priv->rx_vq[0]; while (i >= 0) { - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; - dpaa2_free_dq_storage(dpaa2_q->q_storage); - rte_free(dpaa2_q->q_storage); + dpaa2_q = priv->rx_vq[i]; + dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); priv->rx_vq[i--] = NULL; } if (dpaa2_enable_err_queue) { - dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; - if (dpaa2_q->q_storage) - dpaa2_free_dq_storage(dpaa2_q->q_storage); - rte_free(dpaa2_q->q_storage); + dpaa2_q = priv->rx_err_vq; + dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE); } rte_free(mc_q); - return -1; + return ret; } static void @@ -512,20 +494,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) if (priv->rx_vq[0]) { /* cleaning up queue storage */ for (i = 0; i < priv->nb_rx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; - rte_free(dpaa2_q->q_storage); + dpaa2_q = priv->rx_vq[i]; + dpaa2_queue_storage_free(dpaa2_q, + RTE_MAX_LCORE); } /* cleanup tx queue cscn */ for (i = 0; i < priv->nb_tx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_q = priv->tx_vq[i]; rte_free(dpaa2_q->cscn); } if (priv->flags & DPAA2_TX_CONF_ENABLE) { /* cleanup tx conf queue storage */ for (i = 0; i < priv->nb_tx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *) - priv->tx_conf_vq[i]; - rte_free(dpaa2_q->q_storage); + dpaa2_q = priv->tx_conf_vq[i]; + dpaa2_queue_storage_free(dpaa2_q, + RTE_MAX_LCORE); } } /*free memory for all queues (RX+TX) */ @@ -548,6 +531,9 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) int tx_l4_csum_offload = false; int ret, tc_index; uint32_t max_rx_pktlen; +#if defined(RTE_LIBRTE_IEEE1588) + uint16_t ptp_correction_offset; +#endif PMD_INIT_FUNC_TRACE(); @@ -576,9 +562,11 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) DPAA2_PMD_ERR("Unable to set mtu. check config"); return ret; } - DPAA2_PMD_INFO("MTU configured for the device: %d", + DPAA2_PMD_DEBUG("MTU configured for the device: %d", dev->data->mtu); } else { + DPAA2_PMD_ERR("Configured mtu %d and calculated max-pkt-len is %d which should be <= %d", + eth_conf->rxmode.mtu, max_rx_pktlen, DPAA2_MAX_RX_PKT_LEN); return -1; } @@ -632,6 +620,11 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) dpaa2_enable_ts[dev->data->port_id] = true; } +#if defined(RTE_LIBRTE_IEEE1588) + /* By default setting ptp correction offset for Ethernet SYNC packets */ + ptp_correction_offset = RTE_ETHER_HDR_LEN + 8; + rte_pmd_dpaa2_set_one_step_ts(dev->data->port_id, ptp_correction_offset, 0); +#endif if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) tx_l3_csum_offload = true; @@ -706,14 +699,14 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) */ static int dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, - uint16_t rx_queue_id, - uint16_t nb_rx_desc, - unsigned int socket_id __rte_unused, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mb_pool) + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; struct dpaa2_queue *dpaa2_q; struct dpni_queue cfg; uint8_t options = 0; @@ -735,8 +728,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, /* Rx deferred start is not supported */ if (rx_conf->rx_deferred_start) { - DPAA2_PMD_ERR("%p:Rx deferred start not supported", - (void *)dev); + DPAA2_PMD_ERR("%s:Rx deferred start not supported", + dev->data->name); return -EINVAL; } @@ -752,7 +745,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, if (ret) return ret; } - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + dpaa2_q = priv->rx_vq[rx_queue_id]; dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ dpaa2_q->bp_array = rte_dpaa2_bpid_info; dpaa2_q->nb_desc = UINT16_MAX; @@ -778,7 +771,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, cfg.cgid = i; dpaa2_q->cgid = cfg.cgid; } else { - dpaa2_q->cgid = 0xff; + dpaa2_q->cgid = DPAA2_INVALID_CGID; } /*if ls2088 or rev2 device, enable the stashing */ @@ -802,10 +795,10 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, } } ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, - dpaa2_q->tc_index, flow_id, options, &cfg); + dpaa2_q->tc_index, flow_id, options, &cfg); if (ret) { DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); - return -1; + return ret; } if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { @@ -818,7 +811,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, * There is no HW restriction, but number of CGRs are limited, * hence this restriction is placed. */ - if (dpaa2_q->cgid != 0xff) { + if (dpaa2_q->cgid != DPAA2_INVALID_CGID) { /*enabling per rx queue congestion control */ taildrop.threshold = nb_rx_desc; taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; @@ -844,15 +837,15 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, } if (ret) { DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", - ret); - return -1; + ret); + return ret; } } else { /* Disable tail Drop */ struct dpni_taildrop taildrop = {0}; DPAA2_PMD_INFO("Tail drop is disabled on queue"); taildrop.enable = 0; - if (dpaa2_q->cgid != 0xff) { + if (dpaa2_q->cgid != DPAA2_INVALID_CGID) { ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, dpaa2_q->tc_index, @@ -864,8 +857,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, } if (ret) { DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", - ret); - return -1; + ret); + return ret; } } @@ -875,31 +868,31 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, static int dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, - uint16_t tx_queue_id, - uint16_t nb_tx_desc, - unsigned int socket_id __rte_unused, - const struct rte_eth_txconf *tx_conf) + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) - priv->tx_vq[tx_queue_id]; - struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) - priv->tx_conf_vq[tx_queue_id]; + struct dpaa2_queue *dpaa2_q = priv->tx_vq[tx_queue_id]; + struct dpaa2_queue *dpaa2_tx_conf_q = priv->tx_conf_vq[tx_queue_id]; struct fsl_mc_io *dpni = dev->process_private; struct dpni_queue tx_conf_cfg; struct dpni_queue tx_flow_cfg; uint8_t options = 0, flow_id; + uint8_t ceetm_ch_idx; uint16_t channel_id; struct dpni_queue_id qid; uint32_t tc_id; int ret; + uint64_t iova; PMD_INIT_FUNC_TRACE(); /* Tx deferred start is not supported */ if (tx_conf->tx_deferred_start) { - DPAA2_PMD_ERR("%p:Tx deferred start not supported", - (void *)dev); + DPAA2_PMD_ERR("%s:Tx deferred start not supported", + dev->data->name); return -EINVAL; } @@ -907,7 +900,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, dpaa2_q->offloads = tx_conf->offloads; /* Return if queue already configured */ - if (dpaa2_q->flow_id != 0xffff) { + if (dpaa2_q->flow_id != DPAA2_INVALID_FLOW_ID) { dev->data->tx_queues[tx_queue_id] = dpaa2_q; return 0; } @@ -915,20 +908,27 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); - if (tx_queue_id == 0) { - /*Set tx-conf and error configuration*/ - if (priv->flags & DPAA2_TX_CONF_ENABLE) - ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, - priv->token, - DPNI_CONF_AFFINE); - else - ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, - priv->token, - DPNI_CONF_DISABLE); - if (ret) { - DPAA2_PMD_ERR("Error in set tx conf mode settings: " - "err=%d", ret); - return -1; + if (!tx_queue_id) { + for (ceetm_ch_idx = 0; + ceetm_ch_idx <= (priv->num_channels - 1); + ceetm_ch_idx++) { + /*Set tx-conf and error configuration*/ + if (priv->flags & DPAA2_TX_CONF_ENABLE) { + ret = dpni_set_tx_confirmation_mode(dpni, + CMD_PRI_LOW, priv->token, + ceetm_ch_idx, + DPNI_CONF_AFFINE); + } else { + ret = dpni_set_tx_confirmation_mode(dpni, + CMD_PRI_LOW, priv->token, + ceetm_ch_idx, + DPNI_CONF_DISABLE); + } + if (ret) { + DPAA2_PMD_ERR("Error(%d) in tx conf setting", + ret); + return ret; + } } } @@ -942,7 +942,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, DPAA2_PMD_ERR("Error in setting the tx flow: " "tc_id=%d, flow=%d err=%d", tc_id, flow_id, ret); - return -1; + return ret; } dpaa2_q->flow_id = flow_id; @@ -950,11 +950,11 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, dpaa2_q->tc_index = tc_id; ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index), - dpaa2_q->flow_id, &tx_flow_cfg, &qid); + DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index), + dpaa2_q->flow_id, &tx_flow_cfg, &qid); if (ret) { DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); - return -1; + return ret; } dpaa2_q->fqid = qid.fqid; @@ -970,8 +970,17 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, */ cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; cong_notif_cfg.message_ctx = 0; - cong_notif_cfg.message_iova = - (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); + + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn, + sizeof(struct qbman_result)); + if (iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("No IOMMU map for cscn(%p)(size=%x)", + dpaa2_q->cscn, (uint32_t)sizeof(struct qbman_result)); + + return -ENOBUFS; + } + + cong_notif_cfg.message_iova = iova; cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | @@ -979,16 +988,13 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, DPNI_CONG_OPT_COHERENT_WRITE; cong_notif_cfg.cg_point = DPNI_CP_QUEUE; - ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, - priv->token, - DPNI_QUEUE_TX, - ((channel_id << 8) | tc_id), - &cong_notif_cfg); + ret = dpni_set_congestion_notification(dpni, + CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, + ((channel_id << 8) | tc_id), &cong_notif_cfg); if (ret) { - DPAA2_PMD_ERR( - "Error in setting tx congestion notification: " - "err=%d", ret); - return -ret; + DPAA2_PMD_ERR("Set TX congestion notification err=%d", + ret); + return ret; } } dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; @@ -999,22 +1005,24 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, options = options | DPNI_QUEUE_OPT_USER_CTX; tx_conf_cfg.user_context = (size_t)(dpaa2_q); ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), - dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); + DPNI_QUEUE_TX_CONFIRM, + ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), + dpaa2_tx_conf_q->flow_id, + options, &tx_conf_cfg); if (ret) { - DPAA2_PMD_ERR("Error in setting the tx conf flow: " - "tc_index=%d, flow=%d err=%d", - dpaa2_tx_conf_q->tc_index, - dpaa2_tx_conf_q->flow_id, ret); - return -1; + DPAA2_PMD_ERR("Set TC[%d].TX[%d] conf flow err=%d", + dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, ret); + return ret; } ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), - dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); + DPNI_QUEUE_TX_CONFIRM, + ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), + dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); if (ret) { DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); - return -1; + return ret; } dpaa2_tx_conf_q->fqid = qid.fqid; } @@ -1026,8 +1034,7 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id]; struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; - struct fsl_mc_io *dpni = - (struct fsl_mc_io *)priv->eth_dev->process_private; + struct fsl_mc_io *dpni = priv->eth_dev->process_private; uint8_t options = 0; int ret; struct dpni_queue cfg; @@ -1037,7 +1044,7 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) total_nb_rx_desc -= dpaa2_q->nb_desc; - if (dpaa2_q->cgid != 0xff) { + if (dpaa2_q->cgid != DPAA2_INVALID_CGID) { options = DPNI_QUEUE_OPT_CLEAR_CGID; cfg.cgid = dpaa2_q->cgid; @@ -1049,7 +1056,7 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", dpaa2_q->fqid, ret); priv->cgid_in_use[dpaa2_q->cgid] = 0; - dpaa2_q->cgid = 0xff; + dpaa2_q->cgid = DPAA2_INVALID_CGID; } } @@ -1213,10 +1220,10 @@ dpaa2_dev_start(struct rte_eth_dev *dev) dpaa2_dev_set_link_up(dev); for (i = 0; i < data->nb_rx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; + dpaa2_q = data->rx_queues[i]; ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_RX, dpaa2_q->tc_index, - dpaa2_q->flow_id, &cfg, &qid); + DPNI_QUEUE_RX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &cfg, &qid); if (ret) { DPAA2_PMD_ERR("Error in getting flow information: " "err=%d", ret); @@ -1233,7 +1240,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret); return ret; } - dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + dpaa2_q = priv->rx_err_vq; dpaa2_q->fqid = qid.fqid; dpaa2_q->eth_data = dev->data; @@ -1298,7 +1305,7 @@ static int dpaa2_dev_stop(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; int ret; struct rte_eth_link link; struct rte_device *rdev = dev->device; @@ -1351,7 +1358,7 @@ static int dpaa2_dev_close(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; int i, ret; struct rte_eth_link link; @@ -1362,7 +1369,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev) if (!dpni) { DPAA2_PMD_WARN("Already closed or not started"); - return -1; + return -EINVAL; } dpaa2_tm_deinit(dev); @@ -1371,7 +1378,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev) ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); if (ret) { DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); - return -1; + return ret; } memset(&link, 0, sizeof(link)); @@ -1383,7 +1390,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev) ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); if (ret) { DPAA2_PMD_ERR("Failure closing dpni device with err code %d", - ret); + ret); } /* Free the allocated memory for ethernet private data and dpni*/ @@ -1392,18 +1399,17 @@ dpaa2_dev_close(struct rte_eth_dev *dev) rte_free(dpni); for (i = 0; i < MAX_TCS; i++) - rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); + rte_free(priv->extract.tc_extract_param[i]); if (priv->extract.qos_extract_param) - rte_free((void *)(size_t)priv->extract.qos_extract_param); + rte_free(priv->extract.qos_extract_param); DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); return 0; } static int -dpaa2_dev_promiscuous_enable( - struct rte_eth_dev *dev) +dpaa2_dev_promiscuous_enable(struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; @@ -1484,7 +1490,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1509,13 +1515,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; PMD_INIT_FUNC_TRACE(); - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } @@ -1527,44 +1533,44 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) frame_size - RTE_ETHER_CRC_LEN); if (ret) { DPAA2_PMD_ERR("Setting the max frame length failed"); - return -1; + return ret; } + dev->data->mtu = mtu; DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); return 0; } static int dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, - struct rte_ether_addr *addr, - __rte_unused uint32_t index, - __rte_unused uint32_t pool) + struct rte_ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return -1; + return -EINVAL; } ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, addr->addr_bytes, 0, 0, 0); if (ret) - DPAA2_PMD_ERR( - "error: Adding the MAC ADDR failed: err = %d", ret); - return 0; + DPAA2_PMD_ERR("ERR(%d) Adding the MAC ADDR failed", ret); + return ret; } static void dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, - uint32_t index) + uint32_t index) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; struct rte_eth_dev_data *data = dev->data; struct rte_ether_addr *macaddr; @@ -1572,7 +1578,7 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, macaddr = &data->mac_addrs[index]; - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return; } @@ -1586,15 +1592,15 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, static int dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, - struct rte_ether_addr *addr) + struct rte_ether_addr *addr) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; PMD_INIT_FUNC_TRACE(); - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } @@ -1603,19 +1609,18 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, priv->token, addr->addr_bytes); if (ret) - DPAA2_PMD_ERR( - "error: Setting the MAC ADDR failed %d", ret); + DPAA2_PMD_ERR("ERR(%d) Setting the MAC ADDR failed", ret); return ret; } -static -int dpaa2_dev_stats_get(struct rte_eth_dev *dev, - struct rte_eth_stats *stats) +static int +dpaa2_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; - int32_t retcode; + struct fsl_mc_io *dpni = dev->process_private; + int32_t retcode; uint8_t page0 = 0, page1 = 1, page2 = 2; union dpni_statistics value; int i; @@ -1670,8 +1675,8 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, /* Fill in per queue stats */ for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { - dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; - dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_rxq = priv->rx_vq[i]; + dpaa2_txq = priv->tx_vq[i]; if (dpaa2_rxq) stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; if (dpaa2_txq) @@ -1690,19 +1695,20 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, }; static int -dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, - unsigned int n) +dpaa2_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned int n) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; - int32_t retcode; + int32_t retcode; union dpni_statistics value[5] = {}; unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); + uint8_t page_id, stats_id; if (n < num) return num; - if (xstats == NULL) + if (!xstats) return 0; /* Get Counters from page_0*/ @@ -1737,8 +1743,9 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (i = 0; i < num; i++) { xstats[i].id = i; - xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. - raw.counter[dpaa2_xstats_strings[i].stats_id]; + page_id = dpaa2_xstats_strings[i].page_id; + stats_id = dpaa2_xstats_strings[i].stats_id; + xstats[i].value = value[page_id].raw.counter[stats_id]; } return i; err: @@ -1748,8 +1755,8 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, static int dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, - unsigned int limit) + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); @@ -1767,16 +1774,16 @@ dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, static int dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, - uint64_t *values, unsigned int n) + uint64_t *values, unsigned int n) { unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); uint64_t values_copy[stat_cnt]; + uint8_t page_id, stats_id; if (!ids) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = - (struct fsl_mc_io *)dev->process_private; - int32_t retcode; + struct fsl_mc_io *dpni = dev->process_private; + int32_t retcode; union dpni_statistics value[5] = {}; if (n < stat_cnt) @@ -1810,8 +1817,9 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return 0; for (i = 0; i < stat_cnt; i++) { - values[i] = value[dpaa2_xstats_strings[i].page_id]. - raw.counter[dpaa2_xstats_strings[i].stats_id]; + page_id = dpaa2_xstats_strings[i].page_id; + stats_id = dpaa2_xstats_strings[i].stats_id; + values[i] = value[page_id].raw.counter[stats_id]; } return stat_cnt; } @@ -1821,7 +1829,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, for (i = 0; i < n; i++) { if (ids[i] >= stat_cnt) { DPAA2_PMD_ERR("xstats id value isn't valid"); - return -1; + return -EINVAL; } values[i] = values_copy[ids[i]]; } @@ -1829,8 +1837,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, } static int -dpaa2_xstats_get_names_by_id( - struct rte_eth_dev *dev, +dpaa2_xstats_get_names_by_id(struct rte_eth_dev *dev, const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, unsigned int limit) @@ -1857,14 +1864,14 @@ static int dpaa2_dev_stats_reset(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; int retcode; int i; struct dpaa2_queue *dpaa2_q; PMD_INIT_FUNC_TRACE(); - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } @@ -1875,13 +1882,13 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) /* Reset the per queue stats in dpaa2_queue structure */ for (i = 0; i < priv->nb_rx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_q = priv->rx_vq[i]; if (dpaa2_q) dpaa2_q->rx_pkts = 0; } for (i = 0; i < priv->nb_tx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_q = priv->tx_vq[i]; if (dpaa2_q) dpaa2_q->tx_pkts = 0; } @@ -1900,12 +1907,12 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; struct rte_eth_link link; struct dpni_link_state state = {0}; uint8_t count; - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return 0; } @@ -1915,7 +1922,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, &state); if (ret < 0) { DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); - return -1; + return ret; } if (state.up == RTE_ETH_LINK_DOWN && wait_to_complete) @@ -1934,7 +1941,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; ret = rte_eth_linkstatus_set(dev, &link); - if (ret == -1) + if (ret < 0) DPAA2_PMD_DEBUG("No change in status"); else DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id, @@ -1957,9 +1964,9 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) struct dpni_link_state state = {0}; priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)dev->process_private; + dpni = dev->process_private; - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return ret; } @@ -1969,7 +1976,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) if (ret) { /* Unable to obtain dpni status; Not continuing */ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); - return -EINVAL; + return ret; } /* Enable link if not already enabled */ @@ -1977,13 +1984,13 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); if (ret) { DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); - return -EINVAL; + return ret; } } ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); - return -1; + return ret; } /* changing tx burst function to start enqueues */ @@ -1991,10 +1998,15 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) dev->data->dev_link.link_status = state.up; dev->data->dev_link.link_speed = state.rate; + if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) + dev->data->dev_link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; + else + dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + if (state.up) - DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); + DPAA2_PMD_DEBUG("Port %d Link is Up", dev->data->port_id); else - DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); + DPAA2_PMD_DEBUG("Port %d Link is Down", dev->data->port_id); return ret; } @@ -2014,9 +2026,9 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)dev->process_private; + dpni = dev->process_private; - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("Device has not yet been configured"); return ret; } @@ -2063,26 +2075,26 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret = -EINVAL; struct dpaa2_dev_priv *priv; struct fsl_mc_io *dpni; - struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)dev->process_private; + dpni = dev->process_private; - if (dpni == NULL || fc_conf == NULL) { + if (!dpni || !fc_conf) { DPAA2_PMD_ERR("device not configured"); return ret; } - ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + ret = dpni_get_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); if (ret) { - DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); + DPAA2_PMD_ERR("error: dpni_get_link_cfg %d", ret); return ret; } memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); - if (state.options & DPNI_LINK_OPT_PAUSE) { + if (cfg.options & DPNI_LINK_OPT_PAUSE) { /* DPNI_LINK_OPT_PAUSE set * if ASYM_PAUSE not set, * RX Side flow control (handle received Pause frame) @@ -2091,7 +2103,7 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * RX Side flow control (handle received Pause frame) * No TX side flow control (send Pause frame disabled) */ - if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) + if (!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE)) fc_conf->mode = RTE_ETH_FC_FULL; else fc_conf->mode = RTE_ETH_FC_RX_PAUSE; @@ -2103,7 +2115,7 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * if ASYM_PAUSE not set, * Flow control disabled */ - if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) + if (cfg.options & DPNI_LINK_OPT_ASYM_PAUSE) fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else fc_conf->mode = RTE_ETH_FC_NONE; @@ -2118,36 +2130,31 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret = -EINVAL; struct dpaa2_dev_priv *priv; struct fsl_mc_io *dpni; - struct dpni_link_state state = {0}; struct dpni_link_cfg cfg = {0}; PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)dev->process_private; + dpni = dev->process_private; - if (dpni == NULL) { + if (!dpni) { DPAA2_PMD_ERR("dpni is NULL"); return ret; } - /* It is necessary to obtain the current state before setting fc_conf + /* It is necessary to obtain the current cfg before setting fc_conf * as MC would return error in case rate, autoneg or duplex values are * different. */ - ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + ret = dpni_get_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); if (ret) { - DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); - return -1; + DPAA2_PMD_ERR("Unable to get link cfg (err=%d)", ret); + return ret; } /* Disable link before setting configuration */ dpaa2_dev_set_link_down(dev); - /* Based on fc_conf, update cfg */ - cfg.rate = state.rate; - cfg.options = state.options; - /* update cfg with fc_conf */ switch (fc_conf->mode) { case RTE_ETH_FC_FULL: @@ -2184,7 +2191,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) default: DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", fc_conf->mode); - return -1; + return -EINVAL; } ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); @@ -2373,10 +2380,10 @@ dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, { struct dpaa2_queue *rxq; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + struct fsl_mc_io *dpni = dev->process_private; uint16_t max_frame_length; - rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; + rxq = dev->data->rx_queues[queue_id]; qinfo->mp = rxq->mb_pool; qinfo->scattered_rx = dev->data->scattered_rx; @@ -2492,10 +2499,10 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { * Returns the table of MAC entries (multiple entries) */ static int -populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, - struct rte_ether_addr *mac_entry) +populate_mac_addr(struct fsl_mc_io *dpni_dev, + struct dpaa2_dev_priv *priv, struct rte_ether_addr *mac_entry) { - int ret; + int ret = 0; struct rte_ether_addr phy_mac, prime_mac; memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); @@ -2553,7 +2560,7 @@ populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, return 0; cleanup: - return -1; + return ret; } static int @@ -2612,7 +2619,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) return -1; } dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); - eth_dev->process_private = (void *)dpni_dev; + eth_dev->process_private = dpni_dev; /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ -2641,7 +2648,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) "Failure in opening dpni@%d with err code %d", hw_id, ret); rte_free(dpni_dev); - return -1; + return ret; } if (eth_dev->data->dev_conf.lpbk_mode) @@ -2708,9 +2715,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) { dpaa2_enable_err_queue = 1; - DPAA2_PMD_INFO("Enable error queue"); + DPAA2_PMD_INFO("Enable DMA error checks"); } + if (getenv("DPAA2_PRINT_RX_PARSER_RESULT")) + dpaa2_print_parser_result = 1; + /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); if (ret) { @@ -2789,39 +2799,24 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Init fields w.r.t. classification */ memset(&priv->extract.qos_key_extract, 0, sizeof(struct dpaa2_key_extract)); - priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); + priv->extract.qos_extract_param = rte_malloc(NULL, + DPAA2_EXTRACT_PARAM_MAX_SIZE, + RTE_CACHE_LINE_SIZE); if (!priv->extract.qos_extract_param) { - DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " - " classification ", ret); + DPAA2_PMD_ERR("Memory alloc failed"); goto init_err; } - priv->extract.qos_key_extract.key_info.ipv4_src_offset = - IP_ADDRESS_OFFSET_INVALID; - priv->extract.qos_key_extract.key_info.ipv4_dst_offset = - IP_ADDRESS_OFFSET_INVALID; - priv->extract.qos_key_extract.key_info.ipv6_src_offset = - IP_ADDRESS_OFFSET_INVALID; - priv->extract.qos_key_extract.key_info.ipv6_dst_offset = - IP_ADDRESS_OFFSET_INVALID; for (i = 0; i < MAX_TCS; i++) { memset(&priv->extract.tc_key_extract[i], 0, sizeof(struct dpaa2_key_extract)); - priv->extract.tc_extract_param[i] = - (size_t)rte_malloc(NULL, 256, 64); + priv->extract.tc_extract_param[i] = rte_malloc(NULL, + DPAA2_EXTRACT_PARAM_MAX_SIZE, + RTE_CACHE_LINE_SIZE); if (!priv->extract.tc_extract_param[i]) { - DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification", - ret); + DPAA2_PMD_ERR("Memory alloc failed"); goto init_err; } - priv->extract.tc_key_extract[i].key_info.ipv4_src_offset = - IP_ADDRESS_OFFSET_INVALID; - priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset = - IP_ADDRESS_OFFSET_INVALID; - priv->extract.tc_key_extract[i].key_info.ipv6_src_offset = - IP_ADDRESS_OFFSET_INVALID; - priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset = - IP_ADDRESS_OFFSET_INVALID; } ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, @@ -2831,6 +2826,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) DPAA2_PMD_ERR("Unable to set mtu. check config"); goto init_err; } + eth_dev->data->mtu = RTE_ETHER_MTU; /*TODO To enable soft parser support DPAA2 driver needs to integrate * with external entity to receive byte code for software sequence @@ -2855,6 +2851,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) return ret; } } + + ret = dpaa2_soft_parser_loaded(); + if (ret > 0) + DPAA2_PMD_INFO("soft parser is loaded"); DPAA2_PMD_INFO("%s: netdev created, connected to %s", eth_dev->data->name, dpaa2_dev->ep_name); @@ -2865,11 +2865,127 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) return ret; } -int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev) +int +rte_pmd_dpaa2_dev_is_dpaa2(uint32_t eth_id) { + struct rte_eth_dev *dev; + + if (eth_id >= RTE_MAX_ETHPORTS) + return false; + + dev = &rte_eth_devices[eth_id]; + if (!dev->device) + return false; + return dev->device->driver == &rte_dpaa2_pmd.driver; } +const char * +rte_pmd_dpaa2_ep_name(uint32_t eth_id) +{ + struct rte_eth_dev *dev; + struct dpaa2_dev_priv *priv; + + if (eth_id >= RTE_MAX_ETHPORTS) + return NULL; + + if (!rte_pmd_dpaa2_dev_is_dpaa2(eth_id)) + return NULL; + + dev = &rte_eth_devices[eth_id]; + if (!dev->data) + return NULL; + + if (!dev->data->dev_private) + return NULL; + + priv = dev->data->dev_private; + + return priv->ep_name; +} + +#if defined(RTE_LIBRTE_IEEE1588) +int +rte_pmd_dpaa2_get_one_step_ts(uint16_t port_id, bool mc_query) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->eth_dev->process_private; + struct dpni_single_step_cfg ptp_cfg; + int err; + + if (!mc_query) + return priv->ptp_correction_offset; + + err = dpni_get_single_step_cfg(dpni, CMD_PRI_LOW, priv->token, &ptp_cfg); + if (err) { + DPAA2_PMD_ERR("Failed to retrieve onestep configuration"); + return err; + } + + if (!ptp_cfg.ptp_onestep_reg_base) { + DPAA2_PMD_ERR("1588 onestep reg not available"); + return -1; + } + + priv->ptp_correction_offset = ptp_cfg.offset; + + return priv->ptp_correction_offset; +} + +int +rte_pmd_dpaa2_set_one_step_ts(uint16_t port_id, uint16_t offset, uint8_t ch_update) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = dev->process_private; + struct dpni_single_step_cfg cfg; + int err; + + cfg.en = 1; + cfg.ch_update = ch_update; + cfg.offset = offset; + cfg.peer_delay = 0; + + err = dpni_set_single_step_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (err) + return err; + + priv->ptp_correction_offset = offset; + + return 0; +} +#endif + +static int dpaa2_tx_sg_pool_init(void) +{ + char name[RTE_MEMZONE_NAMESIZE]; + + if (dpaa2_tx_sg_pool) + return 0; + + sprintf(name, "dpaa2_mbuf_tx_sg_pool"); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + dpaa2_tx_sg_pool = rte_pktmbuf_pool_create(name, + DPAA2_POOL_SIZE, + DPAA2_POOL_CACHE_SIZE, 0, + DPAA2_MAX_SGS * sizeof(struct qbman_sge), + rte_socket_id()); + if (!dpaa2_tx_sg_pool) { + DPAA2_PMD_ERR("SG pool creation failed"); + return -ENOMEM; + } + } else { + dpaa2_tx_sg_pool = rte_mempool_lookup(name); + if (!dpaa2_tx_sg_pool) { + DPAA2_PMD_ERR("SG pool lookup failed"); + return -ENOMEM; + } + } + + return 0; +} + static int rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, struct rte_dpaa2_device *dpaa2_dev) @@ -2880,12 +2996,11 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM) { - DPAA2_PMD_ERR( - "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", - RTE_PKTMBUF_HEADROOM, - DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); + DPAA2_PMD_ERR("RTE_PKTMBUF_HEADROOM(%d) < DPAA2 Annotation(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); - return -1; + return -EINVAL; } if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -2924,19 +3039,10 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, /* Invoke PMD device initialization function */ diag = dpaa2_dev_init(eth_dev); - if (diag == 0) { - if (!dpaa2_tx_sg_pool) { - dpaa2_tx_sg_pool = - rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool", - DPAA2_POOL_SIZE, - DPAA2_POOL_CACHE_SIZE, 0, - DPAA2_MAX_SGS * sizeof(struct qbman_sge), - rte_socket_id()); - if (dpaa2_tx_sg_pool == NULL) { - DPAA2_PMD_ERR("SG pool creation failed"); - return -ENOMEM; - } - } + if (!diag) { + diag = dpaa2_tx_sg_pool_init(); + if (diag) + return diag; rte_eth_dev_probing_finish(eth_dev); dpaa2_valid_dev++; return 0; diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index 9feb631d5f..fd3119247a 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -19,6 +19,8 @@ #include #include +#include "base/dpaa2_hw_dpni_annot.h" + #define DPAA2_MIN_RX_BUF_SIZE 512 #define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/ #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2 @@ -29,6 +31,9 @@ #define MAX_DPNI 8 #define DPAA2_MAX_CHANNELS 16 +#define DPAA2_EXTRACT_PARAM_MAX_SIZE 256 +#define DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE 256 + #define DPAA2_RX_DEFAULT_NBDESC 512 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \ @@ -145,14 +150,6 @@ extern bool dpaa2_enable_ts[]; extern uint64_t dpaa2_timestamp_rx_dynflag; extern int dpaa2_timestamp_dynfield_offset; -#define DPAA2_QOS_TABLE_RECONFIGURE 1 -#define DPAA2_FS_TABLE_RECONFIGURE 2 - -#define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4 -#define DPAA2_FS_TABLE_IPADDR_EXTRACT 8 - -#define DPAA2_FLOW_MAX_KEY_SIZE 16 - /* Externally defined */ extern const struct rte_flow_ops dpaa2_flow_ops; @@ -160,29 +157,206 @@ extern const struct rte_tm_ops dpaa2_tm_ops; extern bool dpaa2_enable_err_queue; -#define IP_ADDRESS_OFFSET_INVALID (-1) +extern bool dpaa2_print_parser_result; + +#define DPAA2_FAPR_SIZE \ + (sizeof(struct dpaa2_annot_hdr) - \ + offsetof(struct dpaa2_annot_hdr, word3)) + +#define DPAA2_PR_NXTHDR_OFFSET 0 + +#define DPAA2_FAFE_PSR_OFFSET 2 +#define DPAA2_FAFE_PSR_SIZE 2 + +#define DPAA2_FAF_PSR_OFFSET 4 +#define DPAA2_FAF_PSR_SIZE 12 + +#define DPAA2_FAF_TOTAL_SIZE \ + (DPAA2_FAFE_PSR_SIZE + DPAA2_FAF_PSR_SIZE) + +/* Just most popular Frame attribute flags (FAF) here.*/ +enum dpaa2_rx_faf_offset { + /* Set by SP start*/ + FAFE_VXLAN_IN_VLAN_FRAM = 0, + FAFE_VXLAN_IN_IPV4_FRAM = 1, + FAFE_VXLAN_IN_IPV6_FRAM = 2, + FAFE_VXLAN_IN_UDP_FRAM = 3, + FAFE_VXLAN_IN_TCP_FRAM = 4, + + FAFE_ECPRI_FRAM = 7, + /* Set by SP end*/ + + FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_PTP_FRAM = 3 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_VXLAN_FRAM = 4 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_ETH_FRAM = 10 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_LLC_SNAP_FRAM = 18 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_VLAN_FRAM = 21 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_PPPOE_PPP_FRAM = 25 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_MPLS_FRAM = 27 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_ARP_FRAM = 30 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IP_FRAG_FRAM = 50 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_UDP_FRAM = 70 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_TCP_FRAM = 72 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IPSEC_FRAM = 77 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IPSEC_ESP_FRAM = 78 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_IPSEC_AH_FRAM = 79 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_SCTP_FRAM = 81 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_DCCP_FRAM = 83 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_GTP_FRAM = 87 + DPAA2_FAFE_PSR_SIZE * 8, + FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8, +}; + +enum dpaa2_ecpri_fafe_type { + ECPRI_FAFE_TYPE_0 = (8 - FAFE_ECPRI_FRAM), + ECPRI_FAFE_TYPE_1 = (8 - FAFE_ECPRI_FRAM) | (1 << 1), + ECPRI_FAFE_TYPE_2 = (8 - FAFE_ECPRI_FRAM) | (2 << 1), + ECPRI_FAFE_TYPE_3 = (8 - FAFE_ECPRI_FRAM) | (3 << 1), + ECPRI_FAFE_TYPE_4 = (8 - FAFE_ECPRI_FRAM) | (4 << 1), + ECPRI_FAFE_TYPE_5 = (8 - FAFE_ECPRI_FRAM) | (5 << 1), + ECPRI_FAFE_TYPE_6 = (8 - FAFE_ECPRI_FRAM) | (6 << 1), + ECPRI_FAFE_TYPE_7 = (8 - FAFE_ECPRI_FRAM) | (7 << 1) +}; + +#define DPAA2_PR_ETH_OFF_OFFSET 19 +#define DPAA2_PR_TCI_OFF_OFFSET 21 +#define DPAA2_PR_LAST_ETYPE_OFFSET 23 +#define DPAA2_PR_L3_OFF_OFFSET 27 +#define DPAA2_PR_L4_OFF_OFFSET 30 +#define DPAA2_PR_L5_OFF_OFFSET 31 +#define DPAA2_PR_NXTHDR_OFF_OFFSET 34 + +/* Set by SP for vxlan distribution start*/ +#define DPAA2_VXLAN_IN_TCI_OFFSET 16 + +#define DPAA2_VXLAN_IN_DADDR0_OFFSET 20 +#define DPAA2_VXLAN_IN_DADDR1_OFFSET 22 +#define DPAA2_VXLAN_IN_DADDR2_OFFSET 24 +#define DPAA2_VXLAN_IN_DADDR3_OFFSET 25 +#define DPAA2_VXLAN_IN_DADDR4_OFFSET 26 +#define DPAA2_VXLAN_IN_DADDR5_OFFSET 28 + +#define DPAA2_VXLAN_IN_SADDR0_OFFSET 29 +#define DPAA2_VXLAN_IN_SADDR1_OFFSET 32 +#define DPAA2_VXLAN_IN_SADDR2_OFFSET 33 +#define DPAA2_VXLAN_IN_SADDR3_OFFSET 35 +#define DPAA2_VXLAN_IN_SADDR4_OFFSET 41 +#define DPAA2_VXLAN_IN_SADDR5_OFFSET 42 + +#define DPAA2_VXLAN_VNI_OFFSET 43 +#define DPAA2_VXLAN_IN_TYPE_OFFSET 46 +/* Set by SP for vxlan distribution end*/ + +/* ECPRI shares SP context with VXLAN*/ +#define DPAA2_ECPRI_MSG_OFFSET DPAA2_VXLAN_VNI_OFFSET + +#define DPAA2_ECPRI_MAX_EXTRACT_NB 8 + +struct ipv4_sd_addr_extract_rule { + uint32_t ipv4_src; + uint32_t ipv4_dst; +}; + +struct ipv6_sd_addr_extract_rule { + uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE]; + uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE]; +}; + +struct ipv4_ds_addr_extract_rule { + uint32_t ipv4_dst; + uint32_t ipv4_src; +}; + +struct ipv6_ds_addr_extract_rule { + uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE]; + uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE]; +}; + +union ip_addr_extract_rule { + struct ipv4_sd_addr_extract_rule ipv4_sd_addr; + struct ipv6_sd_addr_extract_rule ipv6_sd_addr; + struct ipv4_ds_addr_extract_rule ipv4_ds_addr; + struct ipv6_ds_addr_extract_rule ipv6_ds_addr; +}; + +union ip_src_addr_extract_rule { + uint32_t ipv4_src; + uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE]; +}; + +union ip_dst_addr_extract_rule { + uint32_t ipv4_dst; + uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE]; +}; + +enum ip_addr_extract_type { + IP_NONE_ADDR_EXTRACT, + IP_SRC_EXTRACT, + IP_DST_EXTRACT, + IP_SRC_DST_EXTRACT, + IP_DST_SRC_EXTRACT +}; + +enum key_prot_type { + /* HW extracts from standard protocol fields*/ + DPAA2_NET_PROT_KEY, + /* HW extracts from FAF of PR*/ + DPAA2_FAF_KEY, + /* HW extracts from PR other than FAF*/ + DPAA2_PR_KEY +}; -struct dpaa2_key_info { +struct key_prot_field { + enum key_prot_type type; + enum net_prot prot; + uint32_t key_field; +}; + +struct dpaa2_raw_region { + uint8_t raw_start; + uint8_t raw_size; +}; + +struct dpaa2_key_profile { + uint8_t num; uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS]; uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS]; - /* Special for IP address. */ - int ipv4_src_offset; - int ipv4_dst_offset; - int ipv6_src_offset; - int ipv6_dst_offset; - uint8_t key_total_size; + + enum ip_addr_extract_type ip_addr_type; + uint8_t ip_addr_extract_pos; + uint8_t ip_addr_extract_off; + + uint8_t raw_extract_pos; + uint8_t raw_extract_off; + uint8_t raw_extract_num; + + uint8_t l4_src_port_present; + uint8_t l4_src_port_pos; + uint8_t l4_src_port_offset; + uint8_t l4_dst_port_present; + uint8_t l4_dst_port_pos; + uint8_t l4_dst_port_offset; + struct key_prot_field prot_field[DPKG_MAX_NUM_OF_EXTRACTS]; + uint16_t key_max_size; + struct dpaa2_raw_region raw_region; }; struct dpaa2_key_extract { struct dpkg_profile_cfg dpkg; - struct dpaa2_key_info key_info; + struct dpaa2_key_profile key_profile; }; struct extract_s { struct dpaa2_key_extract qos_key_extract; struct dpaa2_key_extract tc_key_extract[MAX_TCS]; - uint64_t qos_extract_param; - uint64_t tc_extract_param[MAX_TCS]; + uint8_t *qos_extract_param; + uint8_t *tc_extract_param[MAX_TCS]; }; struct dpaa2_dev_priv { @@ -212,6 +386,10 @@ struct dpaa2_dev_priv { uint8_t max_cgs; uint8_t cgid_in_use[MAX_RX_QUEUES]; + enum rte_dpaa2_dev_type ep_dev_type; /**< Endpoint Device Type */ + uint16_t ep_object_id; /**< Endpoint DPAA2 Object ID */ + char ep_name[RTE_DEV_NAME_MAX_LEN]; + struct extract_s extract; uint16_t ss_offset; @@ -230,7 +408,11 @@ struct dpaa2_dev_priv { rte_spinlock_t lpbk_qp_lock; uint8_t channel_inuse; - LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ + /* Stores correction offset for one step timestamping */ + uint16_t ptp_correction_offset; + + struct dpaa2_dev_flow *curr; + LIST_HEAD(, dpaa2_dev_flow) flows; LIST_HEAD(nodes, dpaa2_tm_node) nodes; LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles; }; @@ -289,7 +471,6 @@ uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue, void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q); void dpaa2_flow_clean(struct rte_eth_dev *dev); uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused; -int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev); int dpaa2_timesync_enable(struct rte_eth_dev *dev); int dpaa2_timesync_disable(struct rte_eth_dev *dev); @@ -306,6 +487,8 @@ int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev, int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev); int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev); +int dpaa2_soft_parser_loaded(void); + int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev, uint16_t qidx, uint64_t cntx, eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk, diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c index 54b17e97c0..de850ae0cf 100644 --- a/drivers/net/dpaa2/dpaa2_flow.c +++ b/drivers/net/dpaa2/dpaa2_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2021 NXP + * Copyright 2018-2022 NXP */ #include @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -22,51 +23,50 @@ #include #include -/* Workaround to discriminate the UDP/TCP/SCTP - * with next protocol of l3. - * MC/WRIOP are not able to identify - * the l4 protocol with l4 ports. - */ -int mc_l4_port_identification; - static char *dpaa2_flow_control_log; -static uint16_t dpaa2_flow_miss_flow_id = - DPNI_FS_MISS_DROP; +static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */ +static int dpaa2_sp_loaded = -1; -#define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE - -enum flow_rule_ipaddr_type { - FLOW_NONE_IPADDR, - FLOW_IPV4_ADDR, - FLOW_IPV6_ADDR +enum dpaa2_flow_entry_size { + DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2), + DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE }; -struct flow_rule_ipaddr { - enum flow_rule_ipaddr_type ipaddr_type; - int qos_ipsrc_offset; - int qos_ipdst_offset; - int fs_ipsrc_offset; - int fs_ipdst_offset; +enum dpaa2_flow_dist_type { + DPAA2_FLOW_QOS_TYPE = 1 << 0, + DPAA2_FLOW_FS_TYPE = 1 << 1 }; -struct rte_flow { - LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ +#define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT 16 +#define DPAA2_FLOW_MAX_KEY_SIZE 16 +#define DPAA2_PROT_FIELD_STRING_SIZE 16 +#define VXLAN_HF_VNI 0x08 + +struct dpaa2_dev_flow { + LIST_ENTRY(dpaa2_dev_flow) next; struct dpni_rule_cfg qos_rule; + uint8_t *qos_key_addr; + uint8_t *qos_mask_addr; + uint16_t qos_rule_size; struct dpni_rule_cfg fs_rule; uint8_t qos_real_key_size; uint8_t fs_real_key_size; + uint8_t *fs_key_addr; + uint8_t *fs_mask_addr; + uint16_t fs_rule_size; uint8_t tc_id; /** Traffic Class ID. */ uint8_t tc_index; /** index within this Traffic Class. */ - enum rte_flow_action_type action; - /* Special for IP address to specify the offset - * in key/mask. - */ - struct flow_rule_ipaddr ipaddr_rule; - struct dpni_fs_action_cfg action_cfg; + enum rte_flow_action_type action_type; + struct dpni_fs_action_cfg fs_action_cfg; +}; + +struct rte_dpaa2_flow_item { + struct rte_flow_item generic_item; + int in_tunnel; }; static const -enum rte_flow_item_type dpaa2_supported_pattern_type[] = { +enum rte_flow_item_type dpaa2_hp_supported_pattern_type[] = { RTE_FLOW_ITEM_TYPE_END, RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_VLAN, @@ -77,27 +77,27 @@ enum rte_flow_item_type dpaa2_supported_pattern_type[] = { RTE_FLOW_ITEM_TYPE_TCP, RTE_FLOW_ITEM_TYPE_SCTP, RTE_FLOW_ITEM_TYPE_GRE, + RTE_FLOW_ITEM_TYPE_GTP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_RAW }; static const -enum rte_flow_action_type dpaa2_supported_action_type[] = { - RTE_FLOW_ACTION_TYPE_END, - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_PORT_ID, - RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, - RTE_FLOW_ACTION_TYPE_RSS +enum rte_flow_item_type dpaa2_sp_supported_pattern_type[] = { + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ECPRI }; static const -enum rte_flow_action_type dpaa2_supported_fs_action_type[] = { +enum rte_flow_action_type dpaa2_supported_action_type[] = { + RTE_FLOW_ACTION_TYPE_END, RTE_FLOW_ACTION_TYPE_QUEUE, RTE_FLOW_ACTION_TYPE_PORT_ID, RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, + RTE_FLOW_ACTION_TYPE_RSS }; -/* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/ -#define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1) - #ifndef __cplusplus static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = { .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, @@ -149,14 +149,43 @@ static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = { }, }; +static const struct rte_flow_item_esp dpaa2_flow_item_esp_mask = { + .hdr = { + .spi = RTE_BE32(0xffffffff), + .seq = RTE_BE32(0xffffffff), + }, +}; + +static const struct rte_flow_item_ah dpaa2_flow_item_ah_mask = { + .spi = RTE_BE32(0xffffffff), +}; + static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = { .protocol = RTE_BE16(0xffff), }; +static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = { + .flags = 0xff, + .vni = { 0xff, 0xff, 0xff }, +}; + +static const struct rte_flow_item_ecpri dpaa2_flow_item_ecpri_mask = { + .hdr.common.type = 0xff, + .hdr.dummy[0] = RTE_BE32(0xffffffff), + .hdr.dummy[1] = RTE_BE32(0xffffffff), + .hdr.dummy[2] = RTE_BE32(0xffffffff), +}; + +static const struct rte_flow_item_gtp dpaa2_flow_item_gtp_mask = { + .teid = RTE_BE32(0xffffffff), +}; + #endif -static inline void dpaa2_prot_field_string( - enum net_prot prot, uint32_t field, +#define DPAA2_FLOW_DUMP printf + +static inline void +dpaa2_prot_field_string(uint32_t prot, uint32_t field, char *string) { if (!dpaa2_flow_control_log) @@ -226,65 +255,111 @@ static inline void dpaa2_prot_field_string( strcat(string, ".type"); else strcat(string, ".unknown field"); + } else if (prot == NET_PROT_GTP) { + rte_strscpy(string, "gtp", DPAA2_PROT_FIELD_STRING_SIZE); + if (field == NH_FLD_GTP_TEID) + strcat(string, ".teid"); + else + strcat(string, ".unknown field"); + } else if (prot == NET_PROT_IPSEC_ESP) { + rte_strscpy(string, "esp", DPAA2_PROT_FIELD_STRING_SIZE); + if (field == NH_FLD_IPSEC_ESP_SPI) + strcat(string, ".spi"); + else if (field == NH_FLD_IPSEC_ESP_SEQUENCE_NUM) + strcat(string, ".seq"); + else + strcat(string, ".unknown field"); } else { - strcpy(string, "unknown protocol"); + sprintf(string, "unknown protocol(%d)", prot); } } -static inline void dpaa2_flow_qos_table_extracts_log( - const struct dpaa2_dev_priv *priv, FILE *f) +static inline void +dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv) { int idx; char string[32]; + const struct dpkg_profile_cfg *dpkg = + &priv->extract.qos_key_extract.dpkg; + const struct dpkg_extract *extract; + enum dpkg_extract_type type; + enum net_prot prot; + uint32_t field; if (!dpaa2_flow_control_log) return; - fprintf(f, "Setup QoS table: number of extracts: %d\r\n", - priv->extract.qos_key_extract.dpkg.num_extracts); - for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts; - idx++) { - dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg - .extracts[idx].extract.from_hdr.prot, - priv->extract.qos_key_extract.dpkg.extracts[idx] - .extract.from_hdr.field, - string); - fprintf(f, "%s", string); - if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts) - fprintf(f, " / "); - } - fprintf(f, "\r\n"); + DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n", + dpkg->num_extracts); + for (idx = 0; idx < dpkg->num_extracts; idx++) { + extract = &dpkg->extracts[idx]; + type = extract->type; + if (type == DPKG_EXTRACT_FROM_HDR) { + prot = extract->extract.from_hdr.prot; + field = extract->extract.from_hdr.field; + dpaa2_prot_field_string(prot, field, + string); + } else if (type == DPKG_EXTRACT_FROM_DATA) { + sprintf(string, "raw offset/len: %d/%d", + extract->extract.from_data.offset, + extract->extract.from_data.size); + } else if (type == DPKG_EXTRACT_FROM_PARSE) { + sprintf(string, "parse offset/len: %d/%d", + extract->extract.from_parse.offset, + extract->extract.from_parse.size); + } + DPAA2_FLOW_DUMP("%s", string); + if ((idx + 1) < dpkg->num_extracts) + DPAA2_FLOW_DUMP(" / "); + } + DPAA2_FLOW_DUMP("\r\n"); } -static inline void dpaa2_flow_fs_table_extracts_log( - const struct dpaa2_dev_priv *priv, int tc_id, FILE *f) +static inline void +dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv, + int tc_id) { int idx; char string[32]; + const struct dpkg_profile_cfg *dpkg = + &priv->extract.tc_key_extract[tc_id].dpkg; + const struct dpkg_extract *extract; + enum dpkg_extract_type type; + enum net_prot prot; + uint32_t field; if (!dpaa2_flow_control_log) return; - fprintf(f, "Setup FS table: number of extracts of TC[%d]: %d\r\n", - tc_id, priv->extract.tc_key_extract[tc_id] - .dpkg.num_extracts); - for (idx = 0; idx < priv->extract.tc_key_extract[tc_id] - .dpkg.num_extracts; idx++) { - dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id] - .dpkg.extracts[idx].extract.from_hdr.prot, - priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx] - .extract.from_hdr.field, - string); - fprintf(f, "%s", string); - if ((idx + 1) < priv->extract.tc_key_extract[tc_id] - .dpkg.num_extracts) - fprintf(f, " / "); - } - fprintf(f, "\r\n"); + DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n", + dpkg->num_extracts, tc_id); + for (idx = 0; idx < dpkg->num_extracts; idx++) { + extract = &dpkg->extracts[idx]; + type = extract->type; + if (type == DPKG_EXTRACT_FROM_HDR) { + prot = extract->extract.from_hdr.prot; + field = extract->extract.from_hdr.field; + dpaa2_prot_field_string(prot, field, + string); + } else if (type == DPKG_EXTRACT_FROM_DATA) { + sprintf(string, "raw offset/len: %d/%d", + extract->extract.from_data.offset, + extract->extract.from_data.size); + } else if (type == DPKG_EXTRACT_FROM_PARSE) { + sprintf(string, "parse offset/len: %d/%d", + extract->extract.from_parse.offset, + extract->extract.from_parse.size); + } + DPAA2_FLOW_DUMP("%s", string); + if ((idx + 1) < dpkg->num_extracts) + DPAA2_FLOW_DUMP(" / "); + } + DPAA2_FLOW_DUMP("\r\n"); } -static inline void dpaa2_flow_qos_entry_log( - const char *log_info, const struct rte_flow *flow, int qos_index, FILE *f) +static inline void +dpaa2_flow_qos_entry_log(const char *log_info, + const struct dpaa2_dev_flow *flow, int qos_index) { int idx; uint8_t *key, *mask; @@ -292,27 +367,34 @@ static inline void dpaa2_flow_qos_entry_log( if (!dpaa2_flow_control_log) return; - fprintf(f, "\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n", - log_info, qos_index, flow->tc_id, flow->qos_real_key_size); - - key = (uint8_t *)(size_t)flow->qos_rule.key_iova; - mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova; + if (qos_index >= 0) { + DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n", + log_info, qos_index, flow->qos_rule_size, + flow->qos_rule.key_size, + flow->tc_id); + } else { + DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n", + log_info, flow->qos_rule_size, + flow->qos_rule.key_size, + flow->tc_id); + } - fprintf(f, "key:\r\n"); - for (idx = 0; idx < flow->qos_real_key_size; idx++) - fprintf(f, "%02x ", key[idx]); + key = flow->qos_key_addr; + mask = flow->qos_mask_addr; - fprintf(f, "\r\nmask:\r\n"); - for (idx = 0; idx < flow->qos_real_key_size; idx++) - fprintf(f, "%02x ", mask[idx]); + DPAA2_FLOW_DUMP("key:\r\n"); + for (idx = 0; idx < flow->qos_rule_size; idx++) + DPAA2_FLOW_DUMP("%02x ", key[idx]); - fprintf(f, "\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info, - flow->ipaddr_rule.qos_ipsrc_offset, - flow->ipaddr_rule.qos_ipdst_offset); + DPAA2_FLOW_DUMP("\r\nmask:\r\n"); + for (idx = 0; idx < flow->qos_rule_size; idx++) + DPAA2_FLOW_DUMP("%02x ", mask[idx]); + DPAA2_FLOW_DUMP("\r\n"); } -static inline void dpaa2_flow_fs_entry_log( - const char *log_info, const struct rte_flow *flow, FILE *f) +static inline void +dpaa2_flow_fs_entry_log(const char *log_info, + const struct dpaa2_dev_flow *flow) { int idx; uint8_t *key, *mask; @@ -320,1655 +402,2727 @@ static inline void dpaa2_flow_fs_entry_log( if (!dpaa2_flow_control_log) return; - fprintf(f, "\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n", - log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size); - - key = (uint8_t *)(size_t)flow->fs_rule.key_iova; - mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova; + DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n", + log_info, flow->tc_index, + flow->fs_rule_size, flow->fs_rule.key_size, + flow->tc_id); - fprintf(f, "key:\r\n"); - for (idx = 0; idx < flow->fs_real_key_size; idx++) - fprintf(f, "%02x ", key[idx]); + key = flow->fs_key_addr; + mask = flow->fs_mask_addr; - fprintf(f, "\r\nmask:\r\n"); - for (idx = 0; idx < flow->fs_real_key_size; idx++) - fprintf(f, "%02x ", mask[idx]); + DPAA2_FLOW_DUMP("key:\r\n"); + for (idx = 0; idx < flow->fs_rule_size; idx++) + DPAA2_FLOW_DUMP("%02x ", key[idx]); - fprintf(f, "\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info, - flow->ipaddr_rule.fs_ipsrc_offset, - flow->ipaddr_rule.fs_ipdst_offset); + DPAA2_FLOW_DUMP("\r\nmask:\r\n"); + for (idx = 0; idx < flow->fs_rule_size; idx++) + DPAA2_FLOW_DUMP("%02x ", mask[idx]); + DPAA2_FLOW_DUMP("\r\n"); } -static inline void dpaa2_flow_extract_key_set( - struct dpaa2_key_info *key_info, int index, uint8_t size) -{ - key_info->key_size[index] = size; - if (index > 0) { - key_info->key_offset[index] = - key_info->key_offset[index - 1] + - key_info->key_size[index - 1]; - } else { - key_info->key_offset[index] = 0; - } - key_info->key_total_size += size; -} +/** For LX2160A, LS2088A and LS1088A*/ +#define WRIOP_CCSR_BASE 0x8b80000 +#define WRIOP_CCSR_CTLU_OFFSET 0 +#define WRIOP_CCSR_CTLU_PARSER_OFFSET 0 +#define WRIOP_CCSR_CTLU_PARSER_INGRESS_OFFSET 0 + +#define WRIOP_INGRESS_PARSER_PHY \ + (WRIOP_CCSR_BASE + WRIOP_CCSR_CTLU_OFFSET + \ + WRIOP_CCSR_CTLU_PARSER_OFFSET + \ + WRIOP_CCSR_CTLU_PARSER_INGRESS_OFFSET) + +struct dpaa2_parser_ccsr { + uint32_t psr_cfg; + uint32_t psr_idle; + uint32_t psr_pclm; + uint8_t psr_ver_min; + uint8_t psr_ver_maj; + uint8_t psr_id1_l; + uint8_t psr_id1_h; + uint32_t psr_rev2; + uint8_t rsv[0x2c]; + uint8_t sp_ins[4032]; +}; -static int dpaa2_flow_extract_add( - struct dpaa2_key_extract *key_extract, - enum net_prot prot, - uint32_t field, uint8_t field_size) +int +dpaa2_soft_parser_loaded(void) { - int index, ip_src = -1, ip_dst = -1; - struct dpkg_profile_cfg *dpkg = &key_extract->dpkg; - struct dpaa2_key_info *key_info = &key_extract->key_info; - - if (dpkg->num_extracts >= - DPKG_MAX_NUM_OF_EXTRACTS) { - DPAA2_PMD_WARN("Number of extracts overflows"); - return -1; - } - /* Before reorder, the IP SRC and IP DST are already last - * extract(s). - */ - for (index = 0; index < dpkg->num_extracts; index++) { - if (dpkg->extracts[index].extract.from_hdr.prot == - NET_PROT_IP) { - if (dpkg->extracts[index].extract.from_hdr.field == - NH_FLD_IP_SRC) { - ip_src = index; - } - if (dpkg->extracts[index].extract.from_hdr.field == - NH_FLD_IP_DST) { - ip_dst = index; - } - } - } + int fd, i, ret = 0; + struct dpaa2_parser_ccsr *parser_ccsr = NULL; - if (ip_src >= 0) - RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts); + dpaa2_flow_control_log = getenv("DPAA2_FLOW_CONTROL_LOG"); - if (ip_dst >= 0) - RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts); + if (dpaa2_sp_loaded >= 0) + return dpaa2_sp_loaded; - if (prot == NET_PROT_IP && - (field == NH_FLD_IP_SRC || - field == NH_FLD_IP_DST)) { - index = dpkg->num_extracts; - } else { - if (ip_src >= 0 && ip_dst >= 0) - index = dpkg->num_extracts - 2; - else if (ip_src >= 0 || ip_dst >= 0) - index = dpkg->num_extracts - 1; - else - index = dpkg->num_extracts; + fd = open("/dev/mem", O_RDWR | O_SYNC); + if (fd < 0) { + DPAA2_PMD_ERR("open \"/dev/mem\" ERROR(%d)", fd); + ret = fd; + goto exit; } - dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR; - dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; - dpkg->extracts[index].extract.from_hdr.prot = prot; - dpkg->extracts[index].extract.from_hdr.field = field; - if (prot == NET_PROT_IP && - (field == NH_FLD_IP_SRC || - field == NH_FLD_IP_DST)) { - dpaa2_flow_extract_key_set(key_info, index, 0); - } else { - dpaa2_flow_extract_key_set(key_info, index, field_size); + parser_ccsr = mmap(NULL, sizeof(struct dpaa2_parser_ccsr), + PROT_READ | PROT_WRITE, MAP_SHARED, fd, + WRIOP_INGRESS_PARSER_PHY); + if (!parser_ccsr) { + DPAA2_PMD_ERR("Map 0x%" PRIx64 "(size=0x%x) failed", + (uint64_t)WRIOP_INGRESS_PARSER_PHY, + (uint32_t)sizeof(struct dpaa2_parser_ccsr)); + ret = -ENOBUFS; + goto exit; } - if (prot == NET_PROT_IP) { - if (field == NH_FLD_IP_SRC) { - if (key_info->ipv4_dst_offset >= 0) { - key_info->ipv4_src_offset = - key_info->ipv4_dst_offset + - NH_FLD_IPV4_ADDR_SIZE; - } else { - key_info->ipv4_src_offset = - key_info->key_offset[index - 1] + - key_info->key_size[index - 1]; - } - if (key_info->ipv6_dst_offset >= 0) { - key_info->ipv6_src_offset = - key_info->ipv6_dst_offset + - NH_FLD_IPV6_ADDR_SIZE; - } else { - key_info->ipv6_src_offset = - key_info->key_offset[index - 1] + - key_info->key_size[index - 1]; - } - } else if (field == NH_FLD_IP_DST) { - if (key_info->ipv4_src_offset >= 0) { - key_info->ipv4_dst_offset = - key_info->ipv4_src_offset + - NH_FLD_IPV4_ADDR_SIZE; - } else { - key_info->ipv4_dst_offset = - key_info->key_offset[index - 1] + - key_info->key_size[index - 1]; - } - if (key_info->ipv6_src_offset >= 0) { - key_info->ipv6_dst_offset = - key_info->ipv6_src_offset + - NH_FLD_IPV6_ADDR_SIZE; - } else { - key_info->ipv6_dst_offset = - key_info->key_offset[index - 1] + - key_info->key_size[index - 1]; - } + DPAA2_PMD_INFO("Parser ID:0x%02x%02x, Rev:major(%02x), minor(%02x)", + parser_ccsr->psr_id1_h, parser_ccsr->psr_id1_l, + parser_ccsr->psr_ver_maj, parser_ccsr->psr_ver_min); + + if (dpaa2_flow_control_log) { + for (i = 0; i < 64; i++) { + DPAA2_FLOW_DUMP("%02x ", + parser_ccsr->sp_ins[i]); + if (!((i + 1) % 16)) + DPAA2_FLOW_DUMP("\r\n"); } } - if (index == dpkg->num_extracts) { - dpkg->num_extracts++; - return 0; + for (i = 0; i < 16; i++) { + if (parser_ccsr->sp_ins[i]) { + dpaa2_sp_loaded = 1; + break; + } } + if (dpaa2_sp_loaded < 0) + dpaa2_sp_loaded = 0; - if (ip_src >= 0) { - ip_src++; - dpkg->extracts[ip_src].type = - DPKG_EXTRACT_FROM_HDR; - dpkg->extracts[ip_src].extract.from_hdr.type = - DPKG_FULL_FIELD; - dpkg->extracts[ip_src].extract.from_hdr.prot = - NET_PROT_IP; - dpkg->extracts[ip_src].extract.from_hdr.field = - NH_FLD_IP_SRC; - dpaa2_flow_extract_key_set(key_info, ip_src, 0); - key_info->ipv4_src_offset += field_size; - key_info->ipv6_src_offset += field_size; - } - if (ip_dst >= 0) { - ip_dst++; - dpkg->extracts[ip_dst].type = - DPKG_EXTRACT_FROM_HDR; - dpkg->extracts[ip_dst].extract.from_hdr.type = - DPKG_FULL_FIELD; - dpkg->extracts[ip_dst].extract.from_hdr.prot = - NET_PROT_IP; - dpkg->extracts[ip_dst].extract.from_hdr.field = - NH_FLD_IP_DST; - dpaa2_flow_extract_key_set(key_info, ip_dst, 0); - key_info->ipv4_dst_offset += field_size; - key_info->ipv6_dst_offset += field_size; - } + ret = dpaa2_sp_loaded; - dpkg->num_extracts++; +exit: + if (parser_ccsr) + munmap(parser_ccsr, sizeof(struct dpaa2_parser_ccsr)); + if (fd >= 0) + close(fd); - return 0; + return ret; } -static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract, - int size) +static int +dpaa2_flow_ip_address_extract(enum net_prot prot, + uint32_t field) { - struct dpkg_profile_cfg *dpkg = &key_extract->dpkg; - struct dpaa2_key_info *key_info = &key_extract->key_info; - int last_extract_size, index; - - if (dpkg->num_extracts != 0 && dpkg->extracts[0].type != - DPKG_EXTRACT_FROM_DATA) { - DPAA2_PMD_WARN("RAW extract cannot be combined with others"); - return -1; - } - - last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE); - dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE); - if (last_extract_size) - dpkg->num_extracts++; - else - last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE; + if (prot == NET_PROT_IPV4 && + (field == NH_FLD_IPV4_SRC_IP || + field == NH_FLD_IPV4_DST_IP)) + return true; + else if (prot == NET_PROT_IPV6 && + (field == NH_FLD_IPV6_SRC_IP || + field == NH_FLD_IPV6_DST_IP)) + return true; + else if (prot == NET_PROT_IP && + (field == NH_FLD_IP_SRC || + field == NH_FLD_IP_DST)) + return true; - for (index = 0; index < dpkg->num_extracts; index++) { - dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA; - if (index == dpkg->num_extracts - 1) - dpkg->extracts[index].extract.from_data.size = - last_extract_size; - else - dpkg->extracts[index].extract.from_data.size = - DPAA2_FLOW_MAX_KEY_SIZE; - dpkg->extracts[index].extract.from_data.offset = - DPAA2_FLOW_MAX_KEY_SIZE * index; - } + return false; +} - key_info->key_total_size = size; - return 0; +static int +dpaa2_flow_l4_src_port_extract(enum net_prot prot, + uint32_t field) +{ + if (prot == NET_PROT_TCP && + field == NH_FLD_TCP_PORT_SRC) + return true; + else if (prot == NET_PROT_UDP && + field == NH_FLD_UDP_PORT_SRC) + return true; + else if (prot == NET_PROT_SCTP && + field == NH_FLD_SCTP_PORT_SRC) + return true; + + return false; } -/* Protocol discrimination. - * Discriminate IPv4/IPv6/vLan by Eth type. - * Discriminate UDP/TCP/ICMP by next proto of IP. - */ -static inline int -dpaa2_flow_proto_discrimination_extract( - struct dpaa2_key_extract *key_extract, - enum rte_flow_item_type type) +static int +dpaa2_flow_l4_dst_port_extract(enum net_prot prot, + uint32_t field) { - if (type == RTE_FLOW_ITEM_TYPE_ETH) { - return dpaa2_flow_extract_add( - key_extract, NET_PROT_ETH, - NH_FLD_ETH_TYPE, - sizeof(rte_be16_t)); - } else if (type == (enum rte_flow_item_type) - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) { - return dpaa2_flow_extract_add( - key_extract, NET_PROT_IP, - NH_FLD_IP_PROTO, - NH_FLD_IP_PROTO_SIZE); - } - - return -1; + if (prot == NET_PROT_TCP && + field == NH_FLD_TCP_PORT_DST) + return true; + else if (prot == NET_PROT_UDP && + field == NH_FLD_UDP_PORT_DST) + return true; + else if (prot == NET_PROT_SCTP && + field == NH_FLD_SCTP_PORT_DST) + return true; + + return false; } -static inline int dpaa2_flow_extract_search( - struct dpkg_profile_cfg *dpkg, - enum net_prot prot, uint32_t field) +static int +dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv, + struct dpaa2_dev_flow *flow) { - int i; + uint16_t qos_index; + int ret; + struct fsl_mc_io *dpni = priv->hw; - for (i = 0; i < dpkg->num_extracts; i++) { - if (dpkg->extracts[i].extract.from_hdr.prot == prot && - dpkg->extracts[i].extract.from_hdr.field == field) { - return i; - } + if (priv->num_rx_tc <= 1 && + flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) { + DPAA2_PMD_WARN("No QoS Table for FS"); + return -EINVAL; } - return -1; -} + /* QoS entry added is only effective for multiple TCs.*/ + qos_index = flow->tc_id * priv->fs_entries + flow->tc_index; + if (qos_index >= priv->qos_entries) { + DPAA2_PMD_ERR("QoS table full(%d >= %d)", + qos_index, priv->qos_entries); + return -EINVAL; + } -static inline int dpaa2_flow_extract_key_offset( - struct dpaa2_key_extract *key_extract, - enum net_prot prot, uint32_t field) -{ - int i; - struct dpkg_profile_cfg *dpkg = &key_extract->dpkg; - struct dpaa2_key_info *key_info = &key_extract->key_info; + dpaa2_flow_qos_entry_log("Start add", flow, qos_index); - if (prot == NET_PROT_IPV4 || - prot == NET_PROT_IPV6) - i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field); - else - i = dpaa2_flow_extract_search(dpkg, prot, field); - - if (i >= 0) { - if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC) - return key_info->ipv4_src_offset; - else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST) - return key_info->ipv4_dst_offset; - else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC) - return key_info->ipv6_src_offset; - else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST) - return key_info->ipv6_dst_offset; - else - return key_info->key_offset[i]; - } else { - return -1; + ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, + priv->token, &flow->qos_rule, + flow->tc_id, qos_index, + 0, 0); + if (ret < 0) { + DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed", + qos_index, flow->tc_id); + return ret; } -} -struct proto_discrimination { - enum rte_flow_item_type type; - union { - rte_be16_t eth_type; - uint8_t ip_proto; - }; -}; + return 0; +} static int -dpaa2_flow_proto_discrimination_rule( - struct dpaa2_dev_priv *priv, struct rte_flow *flow, - struct proto_discrimination proto, int group) +dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv, + struct dpaa2_dev_flow *flow) { - enum net_prot prot; - uint32_t field; - int offset; - size_t key_iova; - size_t mask_iova; - rte_be16_t eth_type; - uint8_t ip_proto; - - if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) { - prot = NET_PROT_ETH; - field = NH_FLD_ETH_TYPE; - } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) { - prot = NET_PROT_IP; - field = NH_FLD_IP_PROTO; - } else { - DPAA2_PMD_ERR( - "Only Eth and IP support to discriminate next proto."); - return -1; - } + int ret; + struct fsl_mc_io *dpni = priv->hw; - offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract, - prot, field); - if (offset < 0) { - DPAA2_PMD_ERR("QoS prot %d field %d extract failed", - prot, field); - return -1; - } - key_iova = flow->qos_rule.key_iova + offset; - mask_iova = flow->qos_rule.mask_iova + offset; - if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) { - eth_type = proto.eth_type; - memcpy((void *)key_iova, (const void *)(ð_type), - sizeof(rte_be16_t)); - eth_type = 0xffff; - memcpy((void *)mask_iova, (const void *)(ð_type), - sizeof(rte_be16_t)); - } else { - ip_proto = proto.ip_proto; - memcpy((void *)key_iova, (const void *)(&ip_proto), - sizeof(uint8_t)); - ip_proto = 0xff; - memcpy((void *)mask_iova, (const void *)(&ip_proto), - sizeof(uint8_t)); + if (flow->tc_index >= priv->fs_entries) { + DPAA2_PMD_ERR("FS table full(%d >= %d)", + flow->tc_index, priv->fs_entries); + return -EINVAL; } - offset = dpaa2_flow_extract_key_offset( - &priv->extract.tc_key_extract[group], - prot, field); - if (offset < 0) { - DPAA2_PMD_ERR("FS prot %d field %d extract failed", - prot, field); - return -1; - } - key_iova = flow->fs_rule.key_iova + offset; - mask_iova = flow->fs_rule.mask_iova + offset; - - if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) { - eth_type = proto.eth_type; - memcpy((void *)key_iova, (const void *)(ð_type), - sizeof(rte_be16_t)); - eth_type = 0xffff; - memcpy((void *)mask_iova, (const void *)(ð_type), - sizeof(rte_be16_t)); - } else { - ip_proto = proto.ip_proto; - memcpy((void *)key_iova, (const void *)(&ip_proto), - sizeof(uint8_t)); - ip_proto = 0xff; - memcpy((void *)mask_iova, (const void *)(&ip_proto), - sizeof(uint8_t)); + dpaa2_flow_fs_entry_log("Start add", flow); + + ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, + priv->token, flow->tc_id, + flow->tc_index, &flow->fs_rule, + &flow->fs_action_cfg); + if (ret < 0) { + DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed", + flow->tc_index, flow->tc_id); + return ret; } return 0; } -static inline int -dpaa2_flow_rule_data_set( - struct dpaa2_key_extract *key_extract, - struct dpni_rule_cfg *rule, - enum net_prot prot, uint32_t field, - const void *key, const void *mask, int size) +static int +dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow, + int offset, int size, + enum dpaa2_flow_dist_type dist_type) { - int offset = dpaa2_flow_extract_key_offset(key_extract, - prot, field); - - if (offset < 0) { - DPAA2_PMD_ERR("prot %d, field %d extract failed", - prot, field); - return -1; + int end; + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + end = flow->qos_rule_size; + if (end > offset) { + memmove(flow->qos_key_addr + offset + size, + flow->qos_key_addr + offset, + end - offset); + memset(flow->qos_key_addr + offset, + 0, size); + + memmove(flow->qos_mask_addr + offset + size, + flow->qos_mask_addr + offset, + end - offset); + memset(flow->qos_mask_addr + offset, + 0, size); + } + flow->qos_rule_size += size; } - memcpy((void *)(size_t)(rule->key_iova + offset), key, size); - memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size); + if (dist_type & DPAA2_FLOW_FS_TYPE) { + end = flow->fs_rule_size; + if (end > offset) { + memmove(flow->fs_key_addr + offset + size, + flow->fs_key_addr + offset, + end - offset); + memset(flow->fs_key_addr + offset, + 0, size); + + memmove(flow->fs_mask_addr + offset + size, + flow->fs_mask_addr + offset, + end - offset); + memset(flow->fs_mask_addr + offset, + 0, size); + } + flow->fs_rule_size += size; + } return 0; } -static inline int -dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule, - const void *key, const void *mask, int size) +static int +dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv, + enum dpaa2_flow_dist_type dist_type, + uint16_t entry_size, uint8_t tc_id) { - int offset = 0; + struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); + int ret; - memcpy((void *)(size_t)(rule->key_iova + offset), key, size); - memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size); + while (curr) { + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + if (priv->num_rx_tc > 1 || + curr->action_type == + RTE_FLOW_ACTION_TYPE_RSS) { + curr->qos_rule.key_size = entry_size; + ret = dpaa2_flow_add_qos_rule(priv, curr); + if (ret) + return ret; + } + } + if (dist_type & DPAA2_FLOW_FS_TYPE && + curr->tc_id == tc_id) { + curr->fs_rule.key_size = entry_size; + ret = dpaa2_flow_add_fs_rule(priv, curr); + if (ret) + return ret; + } + curr = LIST_NEXT(curr, next); + } return 0; } -static inline int -_dpaa2_flow_rule_move_ipaddr_tail( - struct dpaa2_key_extract *key_extract, - struct dpni_rule_cfg *rule, int src_offset, - uint32_t field, bool ipv4) +static int +dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv, + int offset, int size) { - size_t key_src; - size_t mask_src; - size_t key_dst; - size_t mask_dst; - int dst_offset, len; - enum net_prot prot; - char tmp[NH_FLD_IPV6_ADDR_SIZE]; + struct dpaa2_dev_flow *curr; + int ret; - if (field != NH_FLD_IP_SRC && - field != NH_FLD_IP_DST) { - DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST"); - return -1; + curr = priv->curr; + if (!curr) { + DPAA2_PMD_ERR("Current qos flow insert hole failed."); + return -EINVAL; + } else { + ret = dpaa2_flow_rule_insert_hole(curr, offset, size, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; } - if (ipv4) - prot = NET_PROT_IPV4; - else - prot = NET_PROT_IPV6; - dst_offset = dpaa2_flow_extract_key_offset(key_extract, - prot, field); - if (dst_offset < 0) { - DPAA2_PMD_ERR("Field %d reorder extract failed", field); - return -1; - } - key_src = rule->key_iova + src_offset; - mask_src = rule->mask_iova + src_offset; - key_dst = rule->key_iova + dst_offset; - mask_dst = rule->mask_iova + dst_offset; - if (ipv4) - len = sizeof(rte_be32_t); - else - len = NH_FLD_IPV6_ADDR_SIZE; - - memcpy(tmp, (char *)key_src, len); - memset((char *)key_src, 0, len); - memcpy((char *)key_dst, tmp, len); - memcpy(tmp, (char *)mask_src, len); - memset((char *)mask_src, 0, len); - memcpy((char *)mask_dst, tmp, len); + curr = LIST_FIRST(&priv->flows); + while (curr) { + ret = dpaa2_flow_rule_insert_hole(curr, offset, size, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + curr = LIST_NEXT(curr, next); + } return 0; } -static inline int -dpaa2_flow_rule_move_ipaddr_tail( - struct rte_flow *flow, struct dpaa2_dev_priv *priv, - int fs_group) +static int +dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv, + int offset, int size, int tc_id) { + struct dpaa2_dev_flow *curr; int ret; - enum net_prot prot; - if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR) - return 0; + curr = priv->curr; + if (!curr || curr->tc_id != tc_id) { + DPAA2_PMD_ERR("Current flow insert hole failed."); + return -EINVAL; + } else { + ret = dpaa2_flow_rule_insert_hole(curr, offset, size, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) - prot = NET_PROT_IPV4; - else - prot = NET_PROT_IPV6; - - if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) { - ret = _dpaa2_flow_rule_move_ipaddr_tail( - &priv->extract.qos_key_extract, - &flow->qos_rule, - flow->ipaddr_rule.qos_ipsrc_offset, - NH_FLD_IP_SRC, prot == NET_PROT_IPV4); - if (ret) { - DPAA2_PMD_ERR("QoS src address reorder failed"); - return -1; - } - flow->ipaddr_rule.qos_ipsrc_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.qos_key_extract, - prot, NH_FLD_IP_SRC); - } - - if (flow->ipaddr_rule.qos_ipdst_offset >= 0) { - ret = _dpaa2_flow_rule_move_ipaddr_tail( - &priv->extract.qos_key_extract, - &flow->qos_rule, - flow->ipaddr_rule.qos_ipdst_offset, - NH_FLD_IP_DST, prot == NET_PROT_IPV4); - if (ret) { - DPAA2_PMD_ERR("QoS dst address reorder failed"); - return -1; - } - flow->ipaddr_rule.qos_ipdst_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.qos_key_extract, - prot, NH_FLD_IP_DST); - } - - if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) { - ret = _dpaa2_flow_rule_move_ipaddr_tail( - &priv->extract.tc_key_extract[fs_group], - &flow->fs_rule, - flow->ipaddr_rule.fs_ipsrc_offset, - NH_FLD_IP_SRC, prot == NET_PROT_IPV4); - if (ret) { - DPAA2_PMD_ERR("FS src address reorder failed"); - return -1; - } - flow->ipaddr_rule.fs_ipsrc_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.tc_key_extract[fs_group], - prot, NH_FLD_IP_SRC); - } - if (flow->ipaddr_rule.fs_ipdst_offset >= 0) { - ret = _dpaa2_flow_rule_move_ipaddr_tail( - &priv->extract.tc_key_extract[fs_group], - &flow->fs_rule, - flow->ipaddr_rule.fs_ipdst_offset, - NH_FLD_IP_DST, prot == NET_PROT_IPV4); - if (ret) { - DPAA2_PMD_ERR("FS dst address reorder failed"); - return -1; + curr = LIST_FIRST(&priv->flows); + + while (curr) { + if (curr->tc_id != tc_id) { + curr = LIST_NEXT(curr, next); + continue; } - flow->ipaddr_rule.fs_ipdst_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.tc_key_extract[fs_group], - prot, NH_FLD_IP_DST); + ret = dpaa2_flow_rule_insert_hole(curr, offset, size, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + curr = LIST_NEXT(curr, next); } return 0; } static int -dpaa2_flow_extract_support( - const uint8_t *mask_src, - enum rte_flow_item_type type) +dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv, + int faf_byte, enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) { - char mask[64]; - int i, size = 0; - const char *mask_support = 0; + int offset, ret; + struct dpaa2_key_profile *key_profile; + int num, pos; - switch (type) { - case RTE_FLOW_ITEM_TYPE_ETH: - mask_support = (const char *)&dpaa2_flow_item_eth_mask; - size = sizeof(struct rte_flow_item_eth); - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - mask_support = (const char *)&dpaa2_flow_item_vlan_mask; - size = sizeof(struct rte_flow_item_vlan); - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - mask_support = (const char *)&dpaa2_flow_item_ipv4_mask; - size = sizeof(struct rte_flow_item_ipv4); - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - mask_support = (const char *)&dpaa2_flow_item_ipv6_mask; - size = sizeof(struct rte_flow_item_ipv6); - break; - case RTE_FLOW_ITEM_TYPE_ICMP: - mask_support = (const char *)&dpaa2_flow_item_icmp_mask; - size = sizeof(struct rte_flow_item_icmp); - break; - case RTE_FLOW_ITEM_TYPE_UDP: - mask_support = (const char *)&dpaa2_flow_item_udp_mask; - size = sizeof(struct rte_flow_item_udp); - break; - case RTE_FLOW_ITEM_TYPE_TCP: - mask_support = (const char *)&dpaa2_flow_item_tcp_mask; - size = sizeof(struct rte_flow_item_tcp); - break; - case RTE_FLOW_ITEM_TYPE_SCTP: - mask_support = (const char *)&dpaa2_flow_item_sctp_mask; - size = sizeof(struct rte_flow_item_sctp); - break; - case RTE_FLOW_ITEM_TYPE_GRE: - mask_support = (const char *)&dpaa2_flow_item_gre_mask; - size = sizeof(struct rte_flow_item_gre); - break; - default: - return -1; + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_profile = &priv->extract.qos_key_extract.key_profile; + else + key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; + + num = key_profile->num; + + if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; } - memcpy(mask, mask_support, size); + if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { + offset = key_profile->ip_addr_extract_off; + pos = key_profile->ip_addr_extract_pos; + key_profile->ip_addr_extract_pos++; + key_profile->ip_addr_extract_off++; + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_flow_qos_rule_insert_hole(priv, + offset, 1); + } else { + ret = dpaa2_flow_fs_rule_insert_hole(priv, + offset, 1, tc_id); + } + if (ret) + return ret; + } else { + pos = num; + } - for (i = 0; i < size; i++) - mask[i] = (mask[i] | mask_src[i]); + if (pos > 0) { + key_profile->key_offset[pos] = + key_profile->key_offset[pos - 1] + + key_profile->key_size[pos - 1]; + } else { + key_profile->key_offset[pos] = 0; + } - if (memcmp(mask, mask_support, size)) - return -1; + key_profile->key_size[pos] = 1; + key_profile->prot_field[pos].type = DPAA2_FAF_KEY; + key_profile->prot_field[pos].key_field = faf_byte; + key_profile->num++; - return 0; + if (insert_offset) + *insert_offset = key_profile->key_offset[pos]; + + key_profile->key_max_size++; + + return pos; } static int -dpaa2_configure_flow_eth(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_flow_pr_advance(struct dpaa2_dev_priv *priv, + uint32_t pr_offset, uint32_t pr_size, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) { - int index, ret; - int local_cfg = 0; - uint32_t group; - const struct rte_flow_item_eth *spec, *mask; + int offset, ret; + struct dpaa2_key_profile *key_profile; + int num, pos; - /* TODO: Currently upper bound of range parameter is not implemented */ - const struct rte_flow_item_eth *last __rte_unused; - struct dpaa2_dev_priv *priv = dev->data->dev_private; - const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0}; + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_profile = &priv->extract.qos_key_extract.key_profile; + else + key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; - group = attr->group; + num = key_profile->num; - /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_eth *)pattern->spec; - last = (const struct rte_flow_item_eth *)pattern->last; - mask = (const struct rte_flow_item_eth *) - (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask); - if (!spec) { - /* Don't care any field of eth header, - * only care eth protocol. - */ - DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip"); - return 0; + if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; } - /* Get traffic class index and flow id to be configured */ - flow->tc_id = group; - flow->tc_index = attr->priority; - - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_ETH)) { - DPAA2_PMD_WARN("Extract field(s) of ethernet not support."); + if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { + offset = key_profile->ip_addr_extract_off; + pos = key_profile->ip_addr_extract_pos; + key_profile->ip_addr_extract_pos++; + key_profile->ip_addr_extract_off += pr_size; + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_flow_qos_rule_insert_hole(priv, + offset, pr_size); + } else { + ret = dpaa2_flow_fs_rule_insert_hole(priv, + offset, pr_size, tc_id); + } + if (ret) + return ret; + } else { + pos = num; + } - return -1; + if (pos > 0) { + key_profile->key_offset[pos] = + key_profile->key_offset[pos - 1] + + key_profile->key_size[pos - 1]; + } else { + key_profile->key_offset[pos] = 0; } - if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ETH, NH_FLD_ETH_SA); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_ETH, NH_FLD_ETH_SA, - RTE_ETHER_ADDR_LEN); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add ETH_SA failed."); + key_profile->key_size[pos] = pr_size; + key_profile->prot_field[pos].type = DPAA2_PR_KEY; + key_profile->prot_field[pos].key_field = + (pr_offset << 16) | pr_size; + key_profile->num++; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ETH, NH_FLD_ETH_SA); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_ETH, NH_FLD_ETH_SA, - RTE_ETHER_ADDR_LEN); - if (ret) { - DPAA2_PMD_ERR("FS Extract add ETH_SA failed."); - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + if (insert_offset) + *insert_offset = key_profile->key_offset[pos]; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before ETH_SA rule set failed"); - return -1; - } + key_profile->key_max_size += pr_size; - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_ETH, - NH_FLD_ETH_SA, - &spec->hdr.src_addr.addr_bytes, - &mask->hdr.src_addr.addr_bytes, - sizeof(struct rte_ether_addr)); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed"); - return -1; - } + return pos; +} - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_ETH, - NH_FLD_ETH_SA, - &spec->hdr.src_addr.addr_bytes, - &mask->hdr.src_addr.addr_bytes, - sizeof(struct rte_ether_addr)); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed"); - return -1; - } - } +/* Move IPv4/IPv6 addresses to fill new extract previous IP address. + * Current MC/WRIOP only support generic IP extract but IP address + * is not fixed, so we have to put them at end of extracts, otherwise, + * the extracts position following them can't be identified. + */ +static int +dpaa2_flow_key_profile_advance(enum net_prot prot, + uint32_t field, uint8_t field_size, + struct dpaa2_dev_priv *priv, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) +{ + int offset, ret; + struct dpaa2_key_profile *key_profile; + int num, pos; - if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ETH, NH_FLD_ETH_DA); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_ETH, NH_FLD_ETH_DA, - RTE_ETHER_ADDR_LEN); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add ETH_DA failed."); + if (dpaa2_flow_ip_address_extract(prot, field)) { + DPAA2_PMD_ERR("%s only for none IP address extract", + __func__); + return -EINVAL; + } - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_profile = &priv->extract.qos_key_extract.key_profile; + else + key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ETH, NH_FLD_ETH_DA); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_ETH, NH_FLD_ETH_DA, - RTE_ETHER_ADDR_LEN); - if (ret) { - DPAA2_PMD_ERR("FS Extract add ETH_DA failed."); + num = key_profile->num; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; + } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before ETH DA rule set failed"); - return -1; + if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { + offset = key_profile->ip_addr_extract_off; + pos = key_profile->ip_addr_extract_pos; + key_profile->ip_addr_extract_pos++; + key_profile->ip_addr_extract_off += field_size; + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_flow_qos_rule_insert_hole(priv, + offset, field_size); + } else { + ret = dpaa2_flow_fs_rule_insert_hole(priv, + offset, field_size, tc_id); } + if (ret) + return ret; + } else { + pos = num; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_ETH, - NH_FLD_ETH_DA, - &spec->hdr.dst_addr.addr_bytes, - &mask->hdr.dst_addr.addr_bytes, - sizeof(struct rte_ether_addr)); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed"); - return -1; - } + if (pos > 0) { + key_profile->key_offset[pos] = + key_profile->key_offset[pos - 1] + + key_profile->key_size[pos - 1]; + } else { + key_profile->key_offset[pos] = 0; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_ETH, - NH_FLD_ETH_DA, - &spec->hdr.dst_addr.addr_bytes, - &mask->hdr.dst_addr.addr_bytes, - sizeof(struct rte_ether_addr)); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed"); - return -1; - } + key_profile->key_size[pos] = field_size; + key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY; + key_profile->prot_field[pos].prot = prot; + key_profile->prot_field[pos].key_field = field; + key_profile->num++; + + if (insert_offset) + *insert_offset = key_profile->key_offset[pos]; + + if (dpaa2_flow_l4_src_port_extract(prot, field)) { + key_profile->l4_src_port_present = 1; + key_profile->l4_src_port_pos = pos; + key_profile->l4_src_port_offset = + key_profile->key_offset[pos]; + } else if (dpaa2_flow_l4_dst_port_extract(prot, field)) { + key_profile->l4_dst_port_present = 1; + key_profile->l4_dst_port_pos = pos; + key_profile->l4_dst_port_offset = + key_profile->key_offset[pos]; } + key_profile->key_max_size += field_size; - if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ETH, NH_FLD_ETH_TYPE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_ETH, NH_FLD_ETH_TYPE, - RTE_ETHER_TYPE_LEN); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed."); + return pos; +} - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ETH, NH_FLD_ETH_TYPE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_ETH, NH_FLD_ETH_TYPE, - RTE_ETHER_TYPE_LEN); - if (ret) { - DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed."); +static int +dpaa2_flow_faf_add_hdr(int faf_byte, + struct dpaa2_dev_priv *priv, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) +{ + int pos, i, offset; + struct dpaa2_key_extract *key_extract; + struct dpkg_profile_cfg *dpkg; + struct dpkg_extract *extracts; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before ETH TYPE rule set failed"); - return -1; - } + dpkg = &key_extract->dpkg; + extracts = dpkg->extracts; - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_ETH, - NH_FLD_ETH_TYPE, - &spec->hdr.ether_type, - &mask->hdr.ether_type, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed"); - return -1; - } + if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_ETH, - NH_FLD_ETH_TYPE, - &spec->hdr.ether_type, - &mask->hdr.ether_type, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed"); - return -1; + pos = dpaa2_flow_faf_advance(priv, + faf_byte, dist_type, tc_id, + insert_offset); + if (pos < 0) + return pos; + + if (pos != dpkg->num_extracts) { + /* Not the last pos, must have IP address extract.*/ + for (i = dpkg->num_extracts - 1; i >= pos; i--) { + memcpy(&extracts[i + 1], + &extracts[i], sizeof(struct dpkg_extract)); } } - (*device_configured) |= local_cfg; + offset = DPAA2_FAFE_PSR_OFFSET + faf_byte; + + extracts[pos].type = DPKG_EXTRACT_FROM_PARSE; + extracts[pos].extract.from_parse.offset = offset; + extracts[pos].extract.from_parse.size = 1; + + dpkg->num_extracts++; return 0; } static int -dpaa2_configure_flow_vlan(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_flow_pr_add_hdr(uint32_t pr_offset, + uint32_t pr_size, struct dpaa2_dev_priv *priv, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) { - int index, ret; - int local_cfg = 0; - uint32_t group; - const struct rte_flow_item_vlan *spec, *mask; - - const struct rte_flow_item_vlan *last __rte_unused; - struct dpaa2_dev_priv *priv = dev->data->dev_private; - - group = attr->group; + int pos, i; + struct dpaa2_key_extract *key_extract; + struct dpkg_profile_cfg *dpkg; + struct dpkg_extract *extracts; + + if ((pr_offset + pr_size) > DPAA2_FAPR_SIZE) { + DPAA2_PMD_ERR("PR extracts(%d:%d) overflow", + pr_offset, pr_size); + return -EINVAL; + } - /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_vlan *)pattern->spec; - last = (const struct rte_flow_item_vlan *)pattern->last; - mask = (const struct rte_flow_item_vlan *) - (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask); + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; - /* Get traffic class index and flow id to be configured */ - flow->tc_id = group; - flow->tc_index = attr->priority; + dpkg = &key_extract->dpkg; + extracts = dpkg->extracts; - if (!spec) { - /* Don't care any field of vlan header, - * only care vlan protocol. - */ - /* Eth type is actually used for vLan classification. - */ - struct proto_discrimination proto; + if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; + } - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ETH, NH_FLD_ETH_TYPE); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - RTE_FLOW_ITEM_TYPE_ETH); - if (ret) { - DPAA2_PMD_ERR( - "QoS Ext ETH_TYPE to discriminate vLan failed"); + pos = dpaa2_flow_pr_advance(priv, + pr_offset, pr_size, dist_type, tc_id, + insert_offset); + if (pos < 0) + return pos; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; + if (pos != dpkg->num_extracts) { + /* Not the last pos, must have IP address extract.*/ + for (i = dpkg->num_extracts - 1; i >= pos; i--) { + memcpy(&extracts[i + 1], + &extracts[i], sizeof(struct dpkg_extract)); } + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ETH, NH_FLD_ETH_TYPE); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - RTE_FLOW_ITEM_TYPE_ETH); - if (ret) { - DPAA2_PMD_ERR( - "FS Ext ETH_TYPE to discriminate vLan failed."); - - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + extracts[pos].type = DPKG_EXTRACT_FROM_PARSE; + extracts[pos].extract.from_parse.offset = pr_offset; + extracts[pos].extract.from_parse.size = pr_size; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before vLan discrimination set failed"); - return -1; - } + dpkg->num_extracts++; - proto.type = RTE_FLOW_ITEM_TYPE_ETH; - proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, - proto, group); - if (ret) { - DPAA2_PMD_ERR("vLan discrimination rule set failed"); - return -1; - } + return 0; +} - (*device_configured) |= local_cfg; +static int +dpaa2_flow_extract_add_hdr(enum net_prot prot, + uint32_t field, uint8_t field_size, + struct dpaa2_dev_priv *priv, + enum dpaa2_flow_dist_type dist_type, int tc_id, + int *insert_offset) +{ + int pos, i; + struct dpaa2_key_extract *key_extract; + struct dpkg_profile_cfg *dpkg; + struct dpkg_extract *extracts; - return 0; - } + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_VLAN)) { - DPAA2_PMD_WARN("Extract field(s) of vlan not support."); + dpkg = &key_extract->dpkg; + extracts = dpkg->extracts; - return -1; + if (dpaa2_flow_ip_address_extract(prot, field)) { + DPAA2_PMD_ERR("%s only for none IP address extract", + __func__); + return -EINVAL; } - if (!mask->hdr.vlan_tci) - return 0; - - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_VLAN, NH_FLD_VLAN_TCI); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_VLAN, - NH_FLD_VLAN_TCI, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed."); - - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; + if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_VLAN, NH_FLD_VLAN_TCI); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_VLAN, - NH_FLD_VLAN_TCI, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed."); + pos = dpaa2_flow_key_profile_advance(prot, + field, field_size, priv, + dist_type, tc_id, + insert_offset); + if (pos < 0) + return pos; - return -1; + if (pos != dpkg->num_extracts) { + /* Not the last pos, must have IP address extract.*/ + for (i = dpkg->num_extracts - 1; i >= pos; i--) { + memcpy(&extracts[i + 1], + &extracts[i], sizeof(struct dpkg_extract)); } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before VLAN TCI rule set failed"); - return -1; - } - - ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_VLAN, - NH_FLD_VLAN_TCI, - &spec->hdr.vlan_tci, - &mask->hdr.vlan_tci, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed"); - return -1; - } - - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_VLAN, - NH_FLD_VLAN_TCI, - &spec->hdr.vlan_tci, - &mask->hdr.vlan_tci, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed"); - return -1; - } + extracts[pos].type = DPKG_EXTRACT_FROM_HDR; + extracts[pos].extract.from_hdr.prot = prot; + extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD; + extracts[pos].extract.from_hdr.field = field; - (*device_configured) |= local_cfg; + dpkg->num_extracts++; return 0; } static int -dpaa2_configure_flow_ip_discrimation( - struct dpaa2_dev_priv *priv, struct rte_flow *flow, - const struct rte_flow_item *pattern, - int *local_cfg, int *device_configured, - uint32_t group) +dpaa2_flow_extract_new_raw(struct dpaa2_dev_priv *priv, + int offset, int size, + enum dpaa2_flow_dist_type dist_type, int tc_id) { - int index, ret; - struct proto_discrimination proto; + struct dpaa2_key_extract *key_extract; + struct dpkg_profile_cfg *dpkg; + struct dpaa2_key_profile *key_profile; + int last_extract_size, index, pos, item_size; + uint8_t num_extracts; + uint32_t field; - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ETH, NH_FLD_ETH_TYPE); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - RTE_FLOW_ITEM_TYPE_ETH); - if (ret) { - DPAA2_PMD_ERR( - "QoS Extract ETH_TYPE to discriminate IP failed."); - return -1; - } - (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE; - } + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ETH, NH_FLD_ETH_TYPE); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - RTE_FLOW_ITEM_TYPE_ETH); - if (ret) { - DPAA2_PMD_ERR( - "FS Extract ETH_TYPE to discriminate IP failed."); - return -1; - } - (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE; - } + dpkg = &key_extract->dpkg; + key_profile = &key_extract->key_profile; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before IP discrimination set failed"); - return -1; - } + key_profile->raw_region.raw_start = 0; + key_profile->raw_region.raw_size = 0; - proto.type = RTE_FLOW_ITEM_TYPE_ETH; - if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) - proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE); + num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE); + if (last_extract_size) + num_extracts++; else - proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group); - if (ret) { - DPAA2_PMD_ERR("IP discrimination rule set failed"); - return -1; - } + last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE; - (*device_configured) |= (*local_cfg); + for (index = 0; index < num_extracts; index++) { + if (index == num_extracts - 1) + item_size = last_extract_size; + else + item_size = DPAA2_FLOW_MAX_KEY_SIZE; + field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT; + field |= item_size; + + pos = dpaa2_flow_key_profile_advance(NET_PROT_PAYLOAD, + field, item_size, priv, dist_type, + tc_id, NULL); + if (pos < 0) + return pos; + + dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA; + dpkg->extracts[pos].extract.from_data.size = item_size; + dpkg->extracts[pos].extract.from_data.offset = offset; + + if (index == 0) { + key_profile->raw_extract_pos = pos; + key_profile->raw_extract_off = + key_profile->key_offset[pos]; + key_profile->raw_region.raw_start = offset; + } + key_profile->raw_extract_num++; + key_profile->raw_region.raw_size += + key_profile->key_size[pos]; + + offset += item_size; + dpkg->num_extracts++; + } return 0; } - static int -dpaa2_configure_flow_generic_ip( - struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv, + int offset, int size, enum dpaa2_flow_dist_type dist_type, + int tc_id, int *recfg) { - int index, ret; - int local_cfg = 0; - uint32_t group; - const struct rte_flow_item_ipv4 *spec_ipv4 = 0, - *mask_ipv4 = 0; - const struct rte_flow_item_ipv6 *spec_ipv6 = 0, - *mask_ipv6 = 0; - const void *key, *mask; + struct dpaa2_key_profile *key_profile; + struct dpaa2_raw_region *raw_region; + int end = offset + size, ret = 0, extract_extended, sz_extend; + int start_cmp, end_cmp, new_size, index, pos, end_pos; + int last_extract_size, item_size, num_extracts, bk_num = 0; + struct dpkg_extract extract_bk[DPKG_MAX_NUM_OF_EXTRACTS]; + uint8_t key_offset_bk[DPKG_MAX_NUM_OF_EXTRACTS]; + uint8_t key_size_bk[DPKG_MAX_NUM_OF_EXTRACTS]; + struct key_prot_field prot_field_bk[DPKG_MAX_NUM_OF_EXTRACTS]; + struct dpaa2_raw_region raw_hole; + struct dpkg_profile_cfg *dpkg; enum net_prot prot; + uint32_t field; - struct dpaa2_dev_priv *priv = dev->data->dev_private; - const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0}; - int size; - - group = attr->group; - - /* Parse pattern list to get the matching parameters */ - if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) { - spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec; - mask_ipv4 = (const struct rte_flow_item_ipv4 *) - (pattern->mask ? pattern->mask : - &dpaa2_flow_item_ipv4_mask); + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + key_profile = &priv->extract.qos_key_extract.key_profile; + dpkg = &priv->extract.qos_key_extract.dpkg; } else { - spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec; - mask_ipv6 = (const struct rte_flow_item_ipv6 *) - (pattern->mask ? pattern->mask : - &dpaa2_flow_item_ipv6_mask); + key_profile = &priv->extract.tc_key_extract[tc_id].key_profile; + dpkg = &priv->extract.tc_key_extract[tc_id].dpkg; } - /* Get traffic class index and flow id to be configured */ - flow->tc_id = group; - flow->tc_index = attr->priority; + raw_region = &key_profile->raw_region; + if (!raw_region->raw_size) { + /* New RAW region*/ + ret = dpaa2_flow_extract_new_raw(priv, offset, size, + dist_type, tc_id); + if (!ret && recfg) + (*recfg) |= dist_type; - ret = dpaa2_configure_flow_ip_discrimation(priv, - flow, pattern, &local_cfg, - device_configured, group); - if (ret) { - DPAA2_PMD_ERR("IP discrimination failed!"); - return -1; + return ret; } + start_cmp = raw_region->raw_start; + end_cmp = raw_region->raw_start + raw_region->raw_size; - if (!spec_ipv4 && !spec_ipv6) + if (offset >= start_cmp && end <= end_cmp) return 0; - if (mask_ipv4) { - if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4, - RTE_FLOW_ITEM_TYPE_IPV4)) { - DPAA2_PMD_WARN("Extract field(s) of IPv4 not support."); - - return -1; - } + sz_extend = 0; + new_size = raw_region->raw_size; + if (offset < start_cmp) { + sz_extend += start_cmp - offset; + new_size += (start_cmp - offset); } - - if (mask_ipv6) { - if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6, - RTE_FLOW_ITEM_TYPE_IPV6)) { - DPAA2_PMD_WARN("Extract field(s) of IPv6 not support."); - - return -1; - } + if (end > end_cmp) { + sz_extend += end - end_cmp; + new_size += (end - end_cmp); } - if (mask_ipv4 && (mask_ipv4->hdr.src_addr || - mask_ipv4->hdr.dst_addr)) { - flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR; - } else if (mask_ipv6 && - (memcmp(&mask_ipv6->hdr.src_addr, - zero_cmp, NH_FLD_IPV6_ADDR_SIZE) || - memcmp(&mask_ipv6->hdr.dst_addr, - zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) { - flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR; - } - - if ((mask_ipv4 && mask_ipv4->hdr.src_addr) || - (mask_ipv6 && - memcmp(&mask_ipv6->hdr.src_addr, - zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_IP, - NH_FLD_IP_SRC, - 0); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add IP_SRC failed."); - - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + last_extract_size = (new_size % DPAA2_FLOW_MAX_KEY_SIZE); + num_extracts = (new_size / DPAA2_FLOW_MAX_KEY_SIZE); + if (last_extract_size) + num_extracts++; + else + last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE; - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_IP, - NH_FLD_IP_SRC, - 0); - if (ret) { - DPAA2_PMD_ERR("FS Extract add IP_SRC failed."); + if ((key_profile->num + num_extracts - + key_profile->raw_extract_num) >= + DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("%s Failed to expand raw extracts", + __func__); + return -EINVAL; + } - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; + if (offset < start_cmp) { + raw_hole.raw_start = key_profile->raw_extract_off; + raw_hole.raw_size = start_cmp - offset; + raw_region->raw_start = offset; + raw_region->raw_size += start_cmp - offset; + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_flow_qos_rule_insert_hole(priv, + raw_hole.raw_start, + raw_hole.raw_size); + if (ret) + return ret; } - - if (spec_ipv4) - key = &spec_ipv4->hdr.src_addr; - else - key = &spec_ipv6->hdr.src_addr; - if (mask_ipv4) { - mask = &mask_ipv4->hdr.src_addr; - size = NH_FLD_IPV4_ADDR_SIZE; - prot = NET_PROT_IPV4; - } else { - mask = &mask_ipv6->hdr.src_addr; - size = NH_FLD_IPV6_ADDR_SIZE; - prot = NET_PROT_IPV6; + if (dist_type & DPAA2_FLOW_FS_TYPE) { + ret = dpaa2_flow_fs_rule_insert_hole(priv, + raw_hole.raw_start, + raw_hole.raw_size, tc_id); + if (ret) + return ret; } + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - prot, NH_FLD_IP_SRC, - key, mask, size); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed"); - return -1; + if (end > end_cmp) { + raw_hole.raw_start = + key_profile->raw_extract_off + + raw_region->raw_size; + raw_hole.raw_size = end - end_cmp; + raw_region->raw_size += end - end_cmp; + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_flow_qos_rule_insert_hole(priv, + raw_hole.raw_start, + raw_hole.raw_size); + if (ret) + return ret; } - - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - prot, NH_FLD_IP_SRC, - key, mask, size); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed"); - return -1; + if (dist_type & DPAA2_FLOW_FS_TYPE) { + ret = dpaa2_flow_fs_rule_insert_hole(priv, + raw_hole.raw_start, + raw_hole.raw_size, tc_id); + if (ret) + return ret; } + } - flow->ipaddr_rule.qos_ipsrc_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.qos_key_extract, - prot, NH_FLD_IP_SRC); - flow->ipaddr_rule.fs_ipsrc_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.tc_key_extract[group], - prot, NH_FLD_IP_SRC); - } - - if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) || - (mask_ipv6 && - memcmp(&mask_ipv6->hdr.dst_addr, - zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_DST); - if (index < 0) { - if (mask_ipv4) - size = NH_FLD_IPV4_ADDR_SIZE; - else - size = NH_FLD_IPV6_ADDR_SIZE; - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_IP, - NH_FLD_IP_DST, - size); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add IP_DST failed."); - - return -1; + end_pos = key_profile->raw_extract_pos + + key_profile->raw_extract_num; + if (key_profile->num > end_pos) { + bk_num = key_profile->num - end_pos; + memcpy(extract_bk, &dpkg->extracts[end_pos], + bk_num * sizeof(struct dpkg_extract)); + memcpy(key_offset_bk, &key_profile->key_offset[end_pos], + bk_num * sizeof(uint8_t)); + memcpy(key_size_bk, &key_profile->key_size[end_pos], + bk_num * sizeof(uint8_t)); + memcpy(prot_field_bk, &key_profile->prot_field[end_pos], + bk_num * sizeof(struct key_prot_field)); + + for (index = 0; index < bk_num; index++) { + key_offset_bk[index] += sz_extend; + prot = prot_field_bk[index].prot; + field = prot_field_bk[index].key_field; + if (dpaa2_flow_l4_src_port_extract(prot, + field)) { + key_profile->l4_src_port_present = 1; + key_profile->l4_src_port_pos = end_pos + index; + key_profile->l4_src_port_offset = + key_offset_bk[index]; + } else if (dpaa2_flow_l4_dst_port_extract(prot, + field)) { + key_profile->l4_dst_port_present = 1; + key_profile->l4_dst_port_pos = end_pos + index; + key_profile->l4_dst_port_offset = + key_offset_bk[index]; } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; } + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_DST); - if (index < 0) { - if (mask_ipv4) - size = NH_FLD_IPV4_ADDR_SIZE; - else - size = NH_FLD_IPV6_ADDR_SIZE; - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_IP, - NH_FLD_IP_DST, - size); - if (ret) { - DPAA2_PMD_ERR("FS Extract add IP_DST failed."); - - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + pos = key_profile->raw_extract_pos; - if (spec_ipv4) - key = &spec_ipv4->hdr.dst_addr; + for (index = 0; index < num_extracts; index++) { + if (index == num_extracts - 1) + item_size = last_extract_size; else - key = &spec_ipv6->hdr.dst_addr; - if (mask_ipv4) { - mask = &mask_ipv4->hdr.dst_addr; - size = NH_FLD_IPV4_ADDR_SIZE; - prot = NET_PROT_IPV4; + item_size = DPAA2_FLOW_MAX_KEY_SIZE; + field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT; + field |= item_size; + + if (pos > 0) { + key_profile->key_offset[pos] = + key_profile->key_offset[pos - 1] + + key_profile->key_size[pos - 1]; } else { - mask = &mask_ipv6->hdr.dst_addr; - size = NH_FLD_IPV6_ADDR_SIZE; - prot = NET_PROT_IPV6; - } + key_profile->key_offset[pos] = 0; + } + key_profile->key_size[pos] = item_size; + key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY; + key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD; + key_profile->prot_field[pos].key_field = field; + + dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA; + dpkg->extracts[pos].extract.from_data.size = item_size; + dpkg->extracts[pos].extract.from_data.offset = offset; + offset += item_size; + pos++; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - prot, NH_FLD_IP_DST, - key, mask, size); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed"); - return -1; - } + if (bk_num) { + memcpy(&dpkg->extracts[pos], extract_bk, + bk_num * sizeof(struct dpkg_extract)); + memcpy(&key_profile->key_offset[end_pos], + key_offset_bk, bk_num * sizeof(uint8_t)); + memcpy(&key_profile->key_size[end_pos], + key_size_bk, bk_num * sizeof(uint8_t)); + memcpy(&key_profile->prot_field[end_pos], + prot_field_bk, bk_num * sizeof(struct key_prot_field)); + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - prot, NH_FLD_IP_DST, - key, mask, size); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed"); - return -1; - } - flow->ipaddr_rule.qos_ipdst_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.qos_key_extract, - prot, NH_FLD_IP_DST); - flow->ipaddr_rule.fs_ipdst_offset = - dpaa2_flow_extract_key_offset( - &priv->extract.tc_key_extract[group], - prot, NH_FLD_IP_DST); - } - - if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) || - (mask_ipv6 && mask_ipv6->hdr.proto)) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_IP, - NH_FLD_IP_PROTO, - NH_FLD_IP_PROTO_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add IP_DST failed."); + extract_extended = num_extracts - key_profile->raw_extract_num; + if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) { + key_profile->ip_addr_extract_pos += extract_extended; + key_profile->ip_addr_extract_off += sz_extend; + } + key_profile->raw_extract_num = num_extracts; + key_profile->num += extract_extended; + key_profile->key_max_size += sz_extend; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + dpkg->num_extracts += extract_extended; + if (!ret && recfg) + (*recfg) |= dist_type; - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_IP, - NH_FLD_IP_PROTO, - NH_FLD_IP_PROTO_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add IP_DST failed."); + return ret; +} - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } +static inline int +dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile, + enum key_prot_type type, enum net_prot prot, uint32_t key_field) +{ + int pos; + struct key_prot_field *prot_field; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr after NH_FLD_IP_PROTO rule set failed"); - return -1; - } + if (dpaa2_flow_ip_address_extract(prot, key_field)) { + DPAA2_PMD_ERR("%s only for none IP address extract", + __func__); + return -EINVAL; + } - if (spec_ipv4) - key = &spec_ipv4->hdr.next_proto_id; - else - key = &spec_ipv6->hdr.proto; - if (mask_ipv4) - mask = &mask_ipv4->hdr.next_proto_id; - else - mask = &mask_ipv6->hdr.proto; - - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_IP, - NH_FLD_IP_PROTO, - key, mask, NH_FLD_IP_PROTO_SIZE); + prot_field = key_profile->prot_field; + for (pos = 0; pos < key_profile->num; pos++) { + if (type == DPAA2_NET_PROT_KEY && + prot_field[pos].prot == prot && + prot_field[pos].key_field == key_field && + prot_field[pos].type == type) + return pos; + else if (type == DPAA2_FAF_KEY && + prot_field[pos].key_field == key_field && + prot_field[pos].type == type) + return pos; + else if (type == DPAA2_PR_KEY && + prot_field[pos].key_field == key_field && + prot_field[pos].type == type) + return pos; + } + + if (type == DPAA2_NET_PROT_KEY && + dpaa2_flow_l4_src_port_extract(prot, key_field)) { + if (key_profile->l4_src_port_present) + return key_profile->l4_src_port_pos; + } else if (type == DPAA2_NET_PROT_KEY && + dpaa2_flow_l4_dst_port_extract(prot, key_field)) { + if (key_profile->l4_dst_port_present) + return key_profile->l4_dst_port_pos; + } + + return -ENXIO; +} + +static inline int +dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile, + enum key_prot_type type, enum net_prot prot, uint32_t key_field) +{ + int i; + + i = dpaa2_flow_extract_search(key_profile, type, prot, key_field); + if (i >= 0) + return key_profile->key_offset[i]; + else + return i; +} + +static int +dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv, + struct dpaa2_dev_flow *flow, + enum dpaa2_rx_faf_offset faf_bit_off, + int group, + enum dpaa2_flow_dist_type dist_type) +{ + int offset; + uint8_t *key_addr; + uint8_t *mask_addr; + struct dpaa2_key_extract *key_extract; + struct dpaa2_key_profile *key_profile; + uint8_t faf_byte = faf_bit_off / 8; + uint8_t faf_bit_in_byte = faf_bit_off % 8; + + faf_bit_in_byte = 7 - faf_bit_in_byte; + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + key_extract = &priv->extract.qos_key_extract; + key_profile = &key_extract->key_profile; + + offset = dpaa2_flow_extract_key_offset(key_profile, + DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte); + if (offset < 0) { + DPAA2_PMD_ERR("%s QoS key extract failed", __func__); + return -EINVAL; + } + key_addr = flow->qos_key_addr + offset; + mask_addr = flow->qos_mask_addr + offset; + + if (!(*key_addr) && + key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->qos_rule_size++; + + *key_addr |= (1 << faf_bit_in_byte); + *mask_addr |= (1 << faf_bit_in_byte); + } + + if (dist_type & DPAA2_FLOW_FS_TYPE) { + key_extract = &priv->extract.tc_key_extract[group]; + key_profile = &key_extract->key_profile; + + offset = dpaa2_flow_extract_key_offset(key_profile, + DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte); + if (offset < 0) { + DPAA2_PMD_ERR("%s TC[%d] key extract failed", + __func__, group); + return -EINVAL; + } + key_addr = flow->fs_key_addr + offset; + mask_addr = flow->fs_mask_addr + offset; + + if (!(*key_addr) && + key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->fs_rule_size++; + + *key_addr |= (1 << faf_bit_in_byte); + *mask_addr |= (1 << faf_bit_in_byte); + } + + return 0; +} + +static inline int +dpaa2_flow_pr_rule_data_set(struct dpaa2_dev_flow *flow, + struct dpaa2_key_profile *key_profile, + uint32_t pr_offset, uint32_t pr_size, + const void *key, const void *mask, + enum dpaa2_flow_dist_type dist_type) +{ + int offset; + uint32_t pr_field = pr_offset << 16 | pr_size; + + offset = dpaa2_flow_extract_key_offset(key_profile, + DPAA2_PR_KEY, NET_PROT_NONE, pr_field); + if (offset < 0) { + DPAA2_PMD_ERR("PR off(%d)/size(%d) does not exist!", + pr_offset, pr_size); + return -EINVAL; + } + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + memcpy((flow->qos_key_addr + offset), key, pr_size); + memcpy((flow->qos_mask_addr + offset), mask, pr_size); + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->qos_rule_size = offset + pr_size; + } + + if (dist_type & DPAA2_FLOW_FS_TYPE) { + memcpy((flow->fs_key_addr + offset), key, pr_size); + memcpy((flow->fs_mask_addr + offset), mask, pr_size); + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->fs_rule_size = offset + pr_size; + } + + return 0; +} + +static inline int +dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow, + struct dpaa2_key_profile *key_profile, + enum net_prot prot, uint32_t field, int size, + const void *key, const void *mask, + enum dpaa2_flow_dist_type dist_type) +{ + int offset; + + if (dpaa2_flow_ip_address_extract(prot, field)) { + DPAA2_PMD_ERR("%s only for none IP address extract", + __func__); + return -EINVAL; + } + + offset = dpaa2_flow_extract_key_offset(key_profile, + DPAA2_NET_PROT_KEY, prot, field); + if (offset < 0) { + DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!", + prot, field); + return -EINVAL; + } + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + memcpy((flow->qos_key_addr + offset), key, size); + memcpy((flow->qos_mask_addr + offset), mask, size); + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->qos_rule_size = offset + size; + } + + if (dist_type & DPAA2_FLOW_FS_TYPE) { + memcpy((flow->fs_key_addr + offset), key, size); + memcpy((flow->fs_mask_addr + offset), mask, size); + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) + flow->fs_rule_size = offset + size; + } + + return 0; +} + +static inline int +dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow, + struct dpaa2_key_profile *key_profile, + uint32_t extract_offset, int size, + const void *key, const void *mask, + enum dpaa2_flow_dist_type dist_type) +{ + int extract_size = size > DPAA2_FLOW_MAX_KEY_SIZE ? + DPAA2_FLOW_MAX_KEY_SIZE : size; + int offset, field; + + field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT; + field |= extract_size; + offset = dpaa2_flow_extract_key_offset(key_profile, + DPAA2_NET_PROT_KEY, NET_PROT_PAYLOAD, field); + if (offset < 0) { + DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed", + extract_offset, size); + return -EINVAL; + } + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + memcpy((flow->qos_key_addr + offset), key, size); + memcpy((flow->qos_mask_addr + offset), mask, size); + flow->qos_rule_size = offset + size; + } + + if (dist_type & DPAA2_FLOW_FS_TYPE) { + memcpy((flow->fs_key_addr + offset), key, size); + memcpy((flow->fs_mask_addr + offset), mask, size); + flow->fs_rule_size = offset + size; + } + + return 0; +} + +static int +dpaa2_flow_extract_support(const uint8_t *mask_src, + enum rte_flow_item_type type) +{ + char mask[64]; + int i, size = 0; + const char *mask_support = 0; + + switch (type) { + case RTE_FLOW_ITEM_TYPE_ETH: + mask_support = (const char *)&dpaa2_flow_item_eth_mask; + size = sizeof(struct rte_flow_item_eth); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + mask_support = (const char *)&dpaa2_flow_item_vlan_mask; + size = sizeof(struct rte_flow_item_vlan); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + mask_support = (const char *)&dpaa2_flow_item_ipv4_mask; + size = sizeof(struct rte_flow_item_ipv4); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + mask_support = (const char *)&dpaa2_flow_item_ipv6_mask; + size = sizeof(struct rte_flow_item_ipv6); + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + mask_support = (const char *)&dpaa2_flow_item_icmp_mask; + size = sizeof(struct rte_flow_item_icmp); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + mask_support = (const char *)&dpaa2_flow_item_udp_mask; + size = sizeof(struct rte_flow_item_udp); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + mask_support = (const char *)&dpaa2_flow_item_tcp_mask; + size = sizeof(struct rte_flow_item_tcp); + break; + case RTE_FLOW_ITEM_TYPE_ESP: + mask_support = (const char *)&dpaa2_flow_item_esp_mask; + size = sizeof(struct rte_flow_item_esp); + break; + case RTE_FLOW_ITEM_TYPE_AH: + mask_support = (const char *)&dpaa2_flow_item_ah_mask; + size = sizeof(struct rte_flow_item_ah); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + mask_support = (const char *)&dpaa2_flow_item_sctp_mask; + size = sizeof(struct rte_flow_item_sctp); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + mask_support = (const char *)&dpaa2_flow_item_gre_mask; + size = sizeof(struct rte_flow_item_gre); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + mask_support = (const char *)&dpaa2_flow_item_vxlan_mask; + size = sizeof(struct rte_flow_item_vxlan); + break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + mask_support = (const char *)&dpaa2_flow_item_ecpri_mask; + size = sizeof(struct rte_flow_item_ecpri); + break; + case RTE_FLOW_ITEM_TYPE_GTP: + mask_support = (const char *)&dpaa2_flow_item_gtp_mask; + size = sizeof(struct rte_flow_item_gtp); + break; + default: + return -EINVAL; + } + + memcpy(mask, mask_support, size); + + for (i = 0; i < size; i++) + mask[i] = (mask[i] | mask_src[i]); + + if (memcmp(mask, mask_support, size)) + return -ENOTSUP; + + return 0; +} + +static int +dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv, + struct dpaa2_dev_flow *flow, + enum dpaa2_rx_faf_offset faf_off, + enum dpaa2_flow_dist_type dist_type, + int group, int *recfg) +{ + int ret, index, local_cfg = 0; + struct dpaa2_key_extract *extract; + struct dpaa2_key_profile *key_profile; + uint8_t faf_byte = faf_off / 8; + + if (dist_type & DPAA2_FLOW_QOS_TYPE) { + extract = &priv->extract.qos_key_extract; + key_profile = &extract->key_profile; + + index = dpaa2_flow_extract_search(key_profile, + DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte); + if (index < 0) { + ret = dpaa2_flow_faf_add_hdr(faf_byte, + priv, DPAA2_FLOW_QOS_TYPE, group, + NULL); + if (ret) { + DPAA2_PMD_ERR("QOS faf extract add failed"); + + return -EINVAL; + } + local_cfg |= DPAA2_FLOW_QOS_TYPE; + } + + ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group, + DPAA2_FLOW_QOS_TYPE); + if (ret) { + DPAA2_PMD_ERR("QoS faf rule set failed"); + return -EINVAL; + } + } + + if (dist_type & DPAA2_FLOW_FS_TYPE) { + extract = &priv->extract.tc_key_extract[group]; + key_profile = &extract->key_profile; + + index = dpaa2_flow_extract_search(key_profile, + DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte); + if (index < 0) { + ret = dpaa2_flow_faf_add_hdr(faf_byte, + priv, DPAA2_FLOW_FS_TYPE, group, + NULL); + if (ret) { + DPAA2_PMD_ERR("FS[%d] faf extract add failed", + group); + + return -EINVAL; + } + local_cfg |= DPAA2_FLOW_FS_TYPE; + } + + ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group, + DPAA2_FLOW_FS_TYPE); + if (ret) { + DPAA2_PMD_ERR("FS[%d] faf rule set failed", + group); + return -EINVAL; + } + } + + if (recfg) + *recfg |= local_cfg; + + return 0; +} + +static int +dpaa2_flow_add_pr_extract_rule(struct dpaa2_dev_flow *flow, + uint32_t pr_offset, uint32_t pr_size, + const void *key, const void *mask, + struct dpaa2_dev_priv *priv, int tc_id, int *recfg, + enum dpaa2_flow_dist_type dist_type) +{ + int index, ret, local_cfg = 0; + struct dpaa2_key_extract *key_extract; + struct dpaa2_key_profile *key_profile; + uint32_t pr_field = pr_offset << 16 | pr_size; + + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; + + key_profile = &key_extract->key_profile; + + index = dpaa2_flow_extract_search(key_profile, + DPAA2_PR_KEY, NET_PROT_NONE, pr_field); + if (index < 0) { + ret = dpaa2_flow_pr_add_hdr(pr_offset, + pr_size, priv, + dist_type, tc_id, NULL); + if (ret) { + DPAA2_PMD_ERR("PR add off(%d)/size(%d) failed", + pr_offset, pr_size); + + return ret; + } + local_cfg |= dist_type; + } + + ret = dpaa2_flow_pr_rule_data_set(flow, key_profile, + pr_offset, pr_size, key, mask, dist_type); + if (ret) { + DPAA2_PMD_ERR("PR off(%d)/size(%d) rule data set failed", + pr_offset, pr_size); + + return ret; + } + + if (recfg) + *recfg |= local_cfg; + + return 0; +} + +static int +dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow, + enum net_prot prot, uint32_t field, + const void *key, const void *mask, int size, + struct dpaa2_dev_priv *priv, int tc_id, int *recfg, + enum dpaa2_flow_dist_type dist_type) +{ + int index, ret, local_cfg = 0; + struct dpaa2_key_extract *key_extract; + struct dpaa2_key_profile *key_profile; + + if (dpaa2_flow_ip_address_extract(prot, field)) + return -EINVAL; + + if (dist_type == DPAA2_FLOW_QOS_TYPE) + key_extract = &priv->extract.qos_key_extract; + else + key_extract = &priv->extract.tc_key_extract[tc_id]; + + key_profile = &key_extract->key_profile; + + index = dpaa2_flow_extract_search(key_profile, + DPAA2_NET_PROT_KEY, prot, field); + if (index < 0) { + ret = dpaa2_flow_extract_add_hdr(prot, + field, size, priv, + dist_type, tc_id, NULL); if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed"); - return -1; + DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed", + prot, field); + + return ret; + } + local_cfg |= dist_type; + } + + ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile, + prot, field, size, key, mask, dist_type); + if (ret) { + DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed", + prot, field); + + return ret; + } + + if (recfg) + *recfg |= local_cfg; + + return 0; +} + +static int +dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow, + enum net_prot prot, uint32_t field, + const void *key, const void *mask, int size, + struct dpaa2_dev_priv *priv, int tc_id, int *recfg, + enum dpaa2_flow_dist_type dist_type) +{ + int local_cfg = 0, num, ipaddr_extract_len = 0; + struct dpaa2_key_extract *key_extract; + struct dpaa2_key_profile *key_profile; + struct dpkg_profile_cfg *dpkg; + uint8_t *key_addr, *mask_addr; + union ip_addr_extract_rule *ip_addr_data; + union ip_addr_extract_rule *ip_addr_mask; + enum net_prot orig_prot; + uint32_t orig_field; + + if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6) + return -EINVAL; + + if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP && + field != NH_FLD_IPV4_DST_IP) { + return -EINVAL; + } + + if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP && + field != NH_FLD_IPV6_DST_IP) { + return -EINVAL; + } + + orig_prot = prot; + orig_field = field; + + if (prot == NET_PROT_IPV4 && + field == NH_FLD_IPV4_SRC_IP) { + prot = NET_PROT_IP; + field = NH_FLD_IP_SRC; + } else if (prot == NET_PROT_IPV4 && + field == NH_FLD_IPV4_DST_IP) { + prot = NET_PROT_IP; + field = NH_FLD_IP_DST; + } else if (prot == NET_PROT_IPV6 && + field == NH_FLD_IPV6_SRC_IP) { + prot = NET_PROT_IP; + field = NH_FLD_IP_SRC; + } else if (prot == NET_PROT_IPV6 && + field == NH_FLD_IPV6_DST_IP) { + prot = NET_PROT_IP; + field = NH_FLD_IP_DST; + } else { + DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address", + prot, field); + return -EINVAL; + } + + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + key_extract = &priv->extract.qos_key_extract; + key_profile = &key_extract->key_profile; + dpkg = &key_extract->dpkg; + num = key_profile->num; + key_addr = flow->qos_key_addr; + mask_addr = flow->qos_mask_addr; + } else { + key_extract = &priv->extract.tc_key_extract[tc_id]; + key_profile = &key_extract->key_profile; + dpkg = &key_extract->dpkg; + num = key_profile->num; + key_addr = flow->fs_key_addr; + mask_addr = flow->fs_mask_addr; + } + + if (num >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Number of extracts overflows"); + return -EINVAL; + } + + if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) { + if (field == NH_FLD_IP_SRC) + key_profile->ip_addr_type = IP_SRC_EXTRACT; + else + key_profile->ip_addr_type = IP_DST_EXTRACT; + ipaddr_extract_len = size; + + key_profile->ip_addr_extract_pos = num; + if (num > 0) { + key_profile->ip_addr_extract_off = + key_profile->key_offset[num - 1] + + key_profile->key_size[num - 1]; + } else { + key_profile->ip_addr_extract_off = 0; + } + key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE; + } else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) { + if (field == NH_FLD_IP_SRC) { + ipaddr_extract_len = size; + goto rule_configure; + } + key_profile->ip_addr_type = IP_SRC_DST_EXTRACT; + ipaddr_extract_len = size * 2; + key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE; + } else if (key_profile->ip_addr_type == IP_DST_EXTRACT) { + if (field == NH_FLD_IP_DST) { + ipaddr_extract_len = size; + goto rule_configure; + } + key_profile->ip_addr_type = IP_DST_SRC_EXTRACT; + ipaddr_extract_len = size * 2; + key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE; + } + key_profile->num++; + key_profile->prot_field[num].type = DPAA2_NET_PROT_KEY; + + dpkg->extracts[num].extract.from_hdr.prot = prot; + dpkg->extracts[num].extract.from_hdr.field = field; + dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD; + dpkg->num_extracts++; + + if (dist_type == DPAA2_FLOW_QOS_TYPE) + local_cfg = DPAA2_FLOW_QOS_TYPE; + else + local_cfg = DPAA2_FLOW_FS_TYPE; + +rule_configure: + key_addr += key_profile->ip_addr_extract_off; + ip_addr_data = (union ip_addr_extract_rule *)key_addr; + mask_addr += key_profile->ip_addr_extract_off; + ip_addr_mask = (union ip_addr_extract_rule *)mask_addr; + + if (orig_prot == NET_PROT_IPV4 && + orig_field == NH_FLD_IPV4_SRC_IP) { + if (key_profile->ip_addr_type == IP_SRC_EXTRACT || + key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) { + memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src, + key, size); + memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src, + mask, size); + } else { + memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src, + key, size); + memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src, + mask, size); + } + } else if (orig_prot == NET_PROT_IPV4 && + orig_field == NH_FLD_IPV4_DST_IP) { + if (key_profile->ip_addr_type == IP_DST_EXTRACT || + key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) { + memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst, + key, size); + memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst, + mask, size); + } else { + memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst, + key, size); + memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst, + mask, size); + } + } else if (orig_prot == NET_PROT_IPV6 && + orig_field == NH_FLD_IPV6_SRC_IP) { + if (key_profile->ip_addr_type == IP_SRC_EXTRACT || + key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) { + memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src, + key, size); + memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src, + mask, size); + } else { + memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src, + key, size); + memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src, + mask, size); + } + } else if (orig_prot == NET_PROT_IPV6 && + orig_field == NH_FLD_IPV6_DST_IP) { + if (key_profile->ip_addr_type == IP_DST_EXTRACT || + key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) { + memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst, + key, size); + memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst, + mask, size); + } else { + memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst, + key, size); + memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst, + mask, size); } + } + + if (dist_type == DPAA2_FLOW_QOS_TYPE) { + flow->qos_rule_size = + key_profile->ip_addr_extract_off + ipaddr_extract_len; + } else { + flow->fs_rule_size = + key_profile->ip_addr_extract_off + ipaddr_extract_len; + } + + if (recfg) + *recfg |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_tunnel_eth(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_eth *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0}; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_eth_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (!spec) + return 0; + + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ETH); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of ethernet failed"); + + return ret; + } + + if (memcmp((const char *)&mask->src, + zero_cmp, RTE_ETHER_ADDR_LEN)) { + /*SRC[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR0_OFFSET, + 1, &spec->src.addr_bytes[0], + &mask->src.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*SRC[1:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR1_OFFSET, + 2, &spec->src.addr_bytes[1], + &mask->src.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*SRC[3:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR3_OFFSET, + 1, &spec->src.addr_bytes[3], + &mask->src.addr_bytes[3], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*SRC[4:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR4_OFFSET, + 2, &spec->src.addr_bytes[4], + &mask->src.addr_bytes[4], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + /*SRC[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR0_OFFSET, + 1, &spec->src.addr_bytes[0], + &mask->src.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*SRC[1:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR1_OFFSET, + 2, &spec->src.addr_bytes[1], + &mask->src.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*SRC[3:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR3_OFFSET, + 1, &spec->src.addr_bytes[3], + &mask->src.addr_bytes[3], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*SRC[4:2]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_SADDR4_OFFSET, + 2, &spec->src.addr_bytes[4], + &mask->src.addr_bytes[4], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask->dst, + zero_cmp, RTE_ETHER_ADDR_LEN)) { + /*DST[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR0_OFFSET, + 1, &spec->dst.addr_bytes[0], + &mask->dst.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*DST[1:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR1_OFFSET, + 1, &spec->dst.addr_bytes[1], + &mask->dst.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*DST[2:3]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR2_OFFSET, + 3, &spec->dst.addr_bytes[2], + &mask->dst.addr_bytes[2], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + /*DST[5:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR5_OFFSET, + 1, &spec->dst.addr_bytes[5], + &mask->dst.addr_bytes[5], + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + /*DST[0:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR0_OFFSET, + 1, &spec->dst.addr_bytes[0], + &mask->dst.addr_bytes[0], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*DST[1:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR1_OFFSET, + 1, &spec->dst.addr_bytes[1], + &mask->dst.addr_bytes[1], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*DST[2:3]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR2_OFFSET, + 3, &spec->dst.addr_bytes[2], + &mask->dst.addr_bytes[2], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + /*DST[5:1]*/ + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_DADDR5_OFFSET, + 1, &spec->dst.addr_bytes[5], + &mask->dst.addr_bytes[5], + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask->type, + zero_cmp, sizeof(rte_be16_t))) { + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TYPE_OFFSET, + sizeof(rte_be16_t), &spec->type, &mask->type, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TYPE_OFFSET, + sizeof(rte_be16_t), &spec->type, &mask->type, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_eth *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0}; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; + + if (dpaa2_pattern->in_tunnel) { + return dpaa2_configure_flow_tunnel_eth(flow, + dev, attr, pattern, device_configured); + } + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_eth_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_ETH_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_ETH_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + return 0; + } + + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ETH); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of ethernet failed"); + + return ret; + } + + if (memcmp((const char *)&mask->src, + zero_cmp, RTE_ETHER_ADDR_LEN)) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, + NH_FLD_ETH_SA, &spec->src.addr_bytes, + &mask->src.addr_bytes, RTE_ETHER_ADDR_LEN, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, + NH_FLD_ETH_SA, &spec->src.addr_bytes, + &mask->src.addr_bytes, RTE_ETHER_ADDR_LEN, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask->dst, + zero_cmp, RTE_ETHER_ADDR_LEN)) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, + NH_FLD_ETH_DA, &spec->dst.addr_bytes, + &mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, + NH_FLD_ETH_DA, &spec->dst.addr_bytes, + &mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask->type, + zero_cmp, sizeof(rte_be16_t))) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, + NH_FLD_ETH_TYPE, &spec->type, + &mask->type, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH, + NH_FLD_ETH_TYPE, &spec->type, + &mask->type, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_tunnel_vlan(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_vlan *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_vlan_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_VLAN_FRAM, + DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_VLAN_FRAM, + DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + return 0; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_IP, - NH_FLD_IP_PROTO, - key, mask, NH_FLD_IP_PROTO_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed"); - return -1; - } + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_VLAN); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of vlan not support."); + + return ret; } + if (!mask->tci) + return 0; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TCI_OFFSET, + sizeof(rte_be16_t), &spec->tci, &mask->tci, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_IN_TCI_OFFSET, + sizeof(rte_be16_t), &spec->tci, &mask->tci, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + (*device_configured) |= local_cfg; return 0; } static int -dpaa2_configure_flow_icmp(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) { - int index, ret; - int local_cfg = 0; + int ret, local_cfg = 0; uint32_t group; - const struct rte_flow_item_icmp *spec, *mask; - - const struct rte_flow_item_icmp *last __rte_unused; + const struct rte_flow_item_vlan *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; + + if (dpaa2_pattern->in_tunnel) { + return dpaa2_configure_flow_tunnel_vlan(flow, + dev, attr, pattern, device_configured); + } group = attr->group; /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_icmp *)pattern->spec; - last = (const struct rte_flow_item_icmp *)pattern->last; - mask = (const struct rte_flow_item_icmp *) - (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask); + spec = pattern->spec; + mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask; /* Get traffic class index and flow id to be configured */ flow->tc_id = group; flow->tc_index = attr->priority; if (!spec) { - /* Don't care any field of ICMP header, - * only care ICMP protocol. - * Example: flow create 0 ingress pattern icmp / - */ - /* Next proto of Generical IP is actually used - * for ICMP identification. - */ - struct proto_discrimination proto; + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + if (ret) + return ret; - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "QoS Extract IP protocol to discriminate ICMP failed."); + (*device_configured) |= local_cfg; + return 0; + } - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_VLAN); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of vlan not support."); + return ret; + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "FS Extract IP protocol to discriminate ICMP failed."); + if (!mask->tci) + return 0; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN, + NH_FLD_VLAN_TCI, &spec->tci, + &mask->tci, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move IP addr before ICMP discrimination set failed"); - return -1; + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN, + NH_FLD_VLAN_TCI, &spec->tci, + &mask->tci, sizeof(rte_be16_t), + priv, group, &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + return 0; +} + +static int +dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0; + const void *key, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + int size; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec_ipv4 = pattern->spec; + mask_ipv4 = pattern->mask ? + pattern->mask : &dpaa2_flow_item_ipv4_mask; + + if (dpaa2_pattern->in_tunnel) { + if (spec_ipv4) { + DPAA2_PMD_ERR("Tunnel-IPv4 distribution not support"); + return -ENOTSUP; } - proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP; - proto.ip_proto = IPPROTO_ICMP; - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, - proto, group); - if (ret) { - DPAA2_PMD_ERR("ICMP discrimination rule set failed"); - return -1; + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV4_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV4_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM, + DPAA2_FLOW_FS_TYPE, group, &local_cfg); + if (ret) + return ret; + + if (!spec_ipv4) { + (*device_configured) |= local_cfg; + return 0; + } + + ret = dpaa2_flow_extract_support((const uint8_t *)mask_ipv4, + RTE_FLOW_ITEM_TYPE_IPV4); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of IPv4 not support."); + return ret; + } + + if (mask_ipv4->hdr.src_addr) { + key = &spec_ipv4->hdr.src_addr; + mask = &mask_ipv4->hdr.src_addr; + size = sizeof(rte_be32_t); + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, + NH_FLD_IPV4_SRC_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, + NH_FLD_IPV4_SRC_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (mask_ipv4->hdr.dst_addr) { + key = &spec_ipv4->hdr.dst_addr; + mask = &mask_ipv4->hdr.dst_addr; + size = sizeof(rte_be32_t); + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, + NH_FLD_IPV4_DST_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4, + NH_FLD_IPV4_DST_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (mask_ipv4->hdr.next_proto_id) { + key = &spec_ipv4->hdr.next_proto_id; + mask = &mask_ipv4->hdr.next_proto_id; + size = sizeof(uint8_t); + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, + NH_FLD_IP_PROTO, key, + mask, size, priv, group, + &local_cfg, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, + NH_FLD_IP_PROTO, key, + mask, size, priv, group, + &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + return 0; +} + +static int +dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0; + const void *key, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0}; + int size; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec_ipv6 = pattern->spec; + mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (dpaa2_pattern->in_tunnel) { + if (spec_ipv6) { + DPAA2_PMD_ERR("Tunnel-IPv6 distribution not support"); + return -ENOTSUP; } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV6_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_IPV6_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM, + DPAA2_FLOW_FS_TYPE, group, &local_cfg); + if (ret) + return ret; + + if (!spec_ipv6) { (*device_configured) |= local_cfg; + return 0; + } + + ret = dpaa2_flow_extract_support((const uint8_t *)mask_ipv6, + RTE_FLOW_ITEM_TYPE_IPV6); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of IPv6 not support."); + return ret; + } + + if (memcmp((const char *)&mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) { + key = &spec_ipv6->hdr.src_addr; + mask = &mask_ipv6->hdr.src_addr; + size = NH_FLD_IPV6_ADDR_SIZE; + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, + NH_FLD_IPV6_SRC_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, + NH_FLD_IPV6_SRC_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (memcmp((const char *)&mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) { + key = &spec_ipv6->hdr.dst_addr; + mask = &mask_ipv6->hdr.dst_addr; + size = NH_FLD_IPV6_ADDR_SIZE; + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, + NH_FLD_IPV6_DST_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6, + NH_FLD_IPV6_DST_IP, + key, mask, size, priv, + group, &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (mask_ipv6->hdr.proto) { + key = &spec_ipv6->hdr.proto; + mask = &mask_ipv6->hdr.proto; + size = sizeof(uint8_t); + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, + NH_FLD_IP_PROTO, key, + mask, size, priv, group, + &local_cfg, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP, + NH_FLD_IP_PROTO, key, + mask, size, priv, group, + &local_cfg, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + return 0; +} + +static int +dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_icmp *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_icmp_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-ICMP distribution not support"); + return -ENOTSUP; + } + + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_ICMP_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + (*device_configured) |= local_cfg; return 0; } - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_ICMP)) { + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ICMP); + if (ret) { DPAA2_PMD_WARN("Extract field(s) of ICMP not support."); - return -1; + return ret; } if (mask->hdr.icmp_type) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ICMP, NH_FLD_ICMP_TYPE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_ICMP, - NH_FLD_ICMP_TYPE, - NH_FLD_ICMP_TYPE_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed."); + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, + NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type, + &mask->hdr.icmp_type, sizeof(uint8_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, + NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type, + &mask->hdr.icmp_type, sizeof(uint8_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; + if (mask->hdr.icmp_code) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, + NH_FLD_ICMP_CODE, &spec->hdr.icmp_code, + &mask->hdr.icmp_code, sizeof(uint8_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP, + NH_FLD_ICMP_CODE, &spec->hdr.icmp_code, + &mask->hdr.icmp_code, sizeof(uint8_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_udp *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_udp_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (dpaa2_pattern->in_tunnel) { + if (spec) { + DPAA2_PMD_ERR("Tunnel-UDP distribution not support"); + return -ENOTSUP; } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ICMP, NH_FLD_ICMP_TYPE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_ICMP, - NH_FLD_ICMP_TYPE, - NH_FLD_ICMP_TYPE_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed."); + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_UDP_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_UDP_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; + } + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_UDP_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + if (!spec) { + (*device_configured) |= local_cfg; + return 0; + } + + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_UDP); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of UDP not support."); + + return ret; + } + + if (mask->hdr.src_port) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, + NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port, + &mask->hdr.src_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, + NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port, + &mask->hdr.src_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + if (mask->hdr.dst_port) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, + NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port, + &mask->hdr.dst_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP, + NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port, + &mask->hdr.dst_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + +static int +dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_tcp *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_tcp_mask; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before ICMP TYPE set failed"); - return -1; + if (dpaa2_pattern->in_tunnel) { + if (spec) { + DPAA2_PMD_ERR("Tunnel-TCP distribution not support"); + return -ENOTSUP; } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_ICMP, - NH_FLD_ICMP_TYPE, - &spec->hdr.icmp_type, - &mask->hdr.icmp_type, - NH_FLD_ICMP_TYPE_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed"); - return -1; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_TCP_FRAM, + DPAA2_FLOW_QOS_TYPE, group, + &local_cfg); + if (ret) + return ret; - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_ICMP, - NH_FLD_ICMP_TYPE, - &spec->hdr.icmp_type, - &mask->hdr.icmp_type, - NH_FLD_ICMP_TYPE_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed"); - return -1; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_VXLAN_IN_TCP_FRAM, + DPAA2_FLOW_FS_TYPE, group, + &local_cfg); + return ret; } - if (mask->hdr.icmp_code) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_ICMP, NH_FLD_ICMP_CODE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_ICMP, - NH_FLD_ICMP_CODE, - NH_FLD_ICMP_CODE_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed."); + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_TCP_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_ICMP, NH_FLD_ICMP_CODE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_ICMP, - NH_FLD_ICMP_CODE, - NH_FLD_ICMP_CODE_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed."); + if (!spec) { + (*device_configured) |= local_cfg; + return 0; + } - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_TCP); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of TCP not support."); - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr after ICMP CODE set failed"); - return -1; - } + return ret; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_ICMP, - NH_FLD_ICMP_CODE, - &spec->hdr.icmp_code, - &mask->hdr.icmp_code, - NH_FLD_ICMP_CODE_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed"); - return -1; - } + if (mask->hdr.src_port) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, + NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port, + &mask->hdr.src_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, + NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port, + &mask->hdr.src_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_ICMP, - NH_FLD_ICMP_CODE, - &spec->hdr.icmp_code, - &mask->hdr.icmp_code, - NH_FLD_ICMP_CODE_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed"); - return -1; - } + if (mask->hdr.dst_port) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, + NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port, + &mask->hdr.dst_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP, + NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port, + &mask->hdr.dst_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; } (*device_configured) |= local_cfg; @@ -1977,237 +3131,175 @@ dpaa2_configure_flow_icmp(struct rte_flow *flow, } static int -dpaa2_configure_flow_udp(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_configure_flow_esp(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) { - int index, ret; - int local_cfg = 0; + int ret, local_cfg = 0; uint32_t group; - const struct rte_flow_item_udp *spec, *mask; - - const struct rte_flow_item_udp *last __rte_unused; + const struct rte_flow_item_esp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; group = attr->group; /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_udp *)pattern->spec; - last = (const struct rte_flow_item_udp *)pattern->last; - mask = (const struct rte_flow_item_udp *) - (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask); + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_esp_mask; /* Get traffic class index and flow id to be configured */ flow->tc_id = group; flow->tc_index = attr->priority; - if (!spec || !mc_l4_port_identification) { - struct proto_discrimination proto; - - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "QoS Extract IP protocol to discriminate UDP failed."); - - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } - - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "FS Extract IP protocol to discriminate UDP failed."); - - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-ESP distribution not support"); + return -ENOTSUP; + } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move IP addr before UDP discrimination set failed"); - return -1; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_IPSEC_ESP_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; - proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP; - proto.ip_proto = IPPROTO_UDP; - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, - proto, group); - if (ret) { - DPAA2_PMD_ERR("UDP discrimination rule set failed"); - return -1; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_IPSEC_ESP_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + if (!spec) { (*device_configured) |= local_cfg; + return 0; + } + + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ESP); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of ESP not support."); - if (!spec) - return 0; + return ret; } - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_UDP)) { - DPAA2_PMD_WARN("Extract field(s) of UDP not support."); + if (mask->hdr.spi) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP, + NH_FLD_IPSEC_ESP_SPI, &spec->hdr.spi, + &mask->hdr.spi, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP, + NH_FLD_IPSEC_ESP_SPI, &spec->hdr.spi, + &mask->hdr.spi, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - return -1; + if (mask->hdr.seq) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP, + NH_FLD_IPSEC_ESP_SEQUENCE_NUM, &spec->hdr.seq, + &mask->hdr.seq, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP, + NH_FLD_IPSEC_ESP_SEQUENCE_NUM, &spec->hdr.seq, + &mask->hdr.seq, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; } - if (mask->hdr.src_port) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_UDP, - NH_FLD_UDP_PORT_SRC, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed."); + (*device_configured) |= local_cfg; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + return 0; +} - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_UDP, - NH_FLD_UDP_PORT_SRC, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add UDP_SRC failed."); +static int +dpaa2_configure_flow_ah(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_ah *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + group = attr->group; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before UDP_PORT_SRC set failed"); - return -1; - } + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_ah_mask; - ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_UDP, - NH_FLD_UDP_PORT_SRC, - &spec->hdr.src_port, - &mask->hdr.src_port, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_UDP_PORT_SRC rule data set failed"); - return -1; - } + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_UDP, - NH_FLD_UDP_PORT_SRC, - &spec->hdr.src_port, - &mask->hdr.src_port, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_UDP_PORT_SRC rule data set failed"); - return -1; - } + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-AH distribution not support"); + return -ENOTSUP; } - if (mask->hdr.dst_port) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_UDP, NH_FLD_UDP_PORT_DST); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_UDP, - NH_FLD_UDP_PORT_DST, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add UDP_DST failed."); + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_IPSEC_AH_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_IPSEC_AH_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_UDP, NH_FLD_UDP_PORT_DST); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_UDP, - NH_FLD_UDP_PORT_DST, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add UDP_DST failed."); + if (!spec) { + (*device_configured) |= local_cfg; + return 0; + } - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_AH); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of AH not support."); - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before UDP_PORT_DST set failed"); - return -1; - } + return ret; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_UDP, - NH_FLD_UDP_PORT_DST, - &spec->hdr.dst_port, - &mask->hdr.dst_port, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_UDP_PORT_DST rule data set failed"); - return -1; - } + if (mask->spi) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_AH, + NH_FLD_IPSEC_AH_SPI, &spec->spi, + &mask->spi, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_AH, + NH_FLD_IPSEC_AH_SPI, &spec->spi, + &mask->spi, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_UDP, - NH_FLD_UDP_PORT_DST, - &spec->hdr.dst_port, - &mask->hdr.dst_port, - NH_FLD_UDP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_UDP_PORT_DST rule data set failed"); - return -1; - } + if (mask->seq_num) { + DPAA2_PMD_ERR("AH seq distribution not support"); + return -ENOTSUP; } (*device_configured) |= local_cfg; @@ -2216,239 +3308,170 @@ dpaa2_configure_flow_udp(struct rte_flow *flow, } static int -dpaa2_configure_flow_tcp(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) { - int index, ret; - int local_cfg = 0; + int ret, local_cfg = 0; uint32_t group; - const struct rte_flow_item_tcp *spec, *mask; - - const struct rte_flow_item_tcp *last __rte_unused; + const struct rte_flow_item_sctp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_tcp *)pattern->spec; - last = (const struct rte_flow_item_tcp *)pattern->last; - mask = (const struct rte_flow_item_tcp *) - (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask); + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_sctp_mask; /* Get traffic class index and flow id to be configured */ flow->tc_id = group; flow->tc_index = attr->priority; - if (!spec || !mc_l4_port_identification) { - struct proto_discrimination proto; - - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "QoS Extract IP protocol to discriminate TCP failed."); - - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } - - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "FS Extract IP protocol to discriminate TCP failed."); - - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-SCTP distribution not support"); + return -ENOTSUP; + } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move IP addr before TCP discrimination set failed"); - return -1; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; - proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP; - proto.ip_proto = IPPROTO_TCP; - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, - proto, group); - if (ret) { - DPAA2_PMD_ERR("TCP discrimination rule set failed"); - return -1; - } + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_SCTP_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + if (!spec) { (*device_configured) |= local_cfg; - - if (!spec) - return 0; + return 0; } - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_TCP)) { - DPAA2_PMD_WARN("Extract field(s) of TCP not support."); + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_SCTP); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of SCTP not support."); - return -1; + return ret; } if (mask->hdr.src_port) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_TCP, NH_FLD_TCP_PORT_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_TCP, - NH_FLD_TCP_PORT_SRC, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed."); + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, + NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port, + &mask->hdr.src_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, + NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port, + &mask->hdr.src_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + if (mask->hdr.dst_port) { + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, + NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port, + &mask->hdr.dst_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP, + NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port, + &mask->hdr.dst_port, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_TCP, NH_FLD_TCP_PORT_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_TCP, - NH_FLD_TCP_PORT_SRC, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add TCP_SRC failed."); + (*device_configured) |= local_cfg; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + return 0; +} - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before TCP_PORT_SRC set failed"); - return -1; - } +static int +dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_gre *spec, *mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_TCP, - NH_FLD_TCP_PORT_SRC, - &spec->hdr.src_port, - &mask->hdr.src_port, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_TCP_PORT_SRC rule data set failed"); - return -1; - } + group = attr->group; - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_TCP, - NH_FLD_TCP_PORT_SRC, - &spec->hdr.src_port, - &mask->hdr.src_port, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_TCP_PORT_SRC rule data set failed"); - return -1; - } + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_gre_mask; + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-GRE distribution not support"); + return -ENOTSUP; } - if (mask->hdr.dst_port) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_TCP, NH_FLD_TCP_PORT_DST); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_TCP, - NH_FLD_TCP_PORT_DST, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add TCP_DST failed."); + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_GRE_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + (*device_configured) |= local_cfg; + return 0; + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_TCP, NH_FLD_TCP_PORT_DST); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_TCP, - NH_FLD_TCP_PORT_DST, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add TCP_DST failed."); + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_GRE); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of GRE not support."); - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + return ret; + } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before TCP_PORT_DST set failed"); - return -1; - } + if (!mask->protocol) + return 0; - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_TCP, - NH_FLD_TCP_PORT_DST, - &spec->hdr.dst_port, - &mask->hdr.dst_port, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_TCP_PORT_DST rule data set failed"); - return -1; - } + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE, + NH_FLD_GRE_TYPE, &spec->protocol, + &mask->protocol, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_TCP, - NH_FLD_TCP_PORT_DST, - &spec->hdr.dst_port, - &mask->hdr.dst_port, - NH_FLD_TCP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_TCP_PORT_DST rule data set failed"); - return -1; - } - } + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE, + NH_FLD_GRE_TYPE, &spec->protocol, + &mask->protocol, sizeof(rte_be16_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; (*device_configured) |= local_cfg; @@ -2456,239 +3479,414 @@ dpaa2_configure_flow_tcp(struct rte_flow *flow, } static int -dpaa2_configure_flow_sctp(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) { - int index, ret; - int local_cfg = 0; + int ret, local_cfg = 0; uint32_t group; - const struct rte_flow_item_sctp *spec, *mask; - - const struct rte_flow_item_sctp *last __rte_unused; + const struct rte_flow_item_vxlan *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; group = attr->group; /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_sctp *)pattern->spec; - last = (const struct rte_flow_item_sctp *)pattern->last; - mask = (const struct rte_flow_item_sctp *) - (pattern->mask ? pattern->mask : - &dpaa2_flow_item_sctp_mask); + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_vxlan_mask; /* Get traffic class index and flow id to be configured */ flow->tc_id = group; flow->tc_index = attr->priority; - if (!spec || !mc_l4_port_identification) { - struct proto_discrimination proto; + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-VXLAN distribution not support"); + return -ENOTSUP; + } - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "QoS Extract IP protocol to discriminate SCTP failed."); + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_VXLAN_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + (*device_configured) |= local_cfg; + return 0; + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "FS Extract IP protocol to discriminate SCTP failed."); + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_VXLAN); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of VXLAN not support."); - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + return ret; + } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before SCTP discrimination set failed"); - return -1; + if (mask->flags) { + if (spec->flags != VXLAN_HF_VNI) { + DPAA2_PMD_ERR("vxlan flag(0x%02x) must be 0x%02x.", + spec->flags, VXLAN_HF_VNI); + return -EINVAL; } - - proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP; - proto.ip_proto = IPPROTO_SCTP; - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, - proto, group); - if (ret) { - DPAA2_PMD_ERR("SCTP discrimination rule set failed"); - return -1; + if (mask->flags != 0xff) { + DPAA2_PMD_ERR("Not support to extract vxlan flag."); + return -EINVAL; } - - (*device_configured) |= local_cfg; - - if (!spec) - return 0; } - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_SCTP)) { - DPAA2_PMD_WARN("Extract field(s) of SCTP not support."); - - return -1; + if (mask->vni[0] || mask->vni[1] || mask->vni[2]) { + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_VNI_OFFSET, + sizeof(mask->vni), spec->vni, + mask->vni, + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + DPAA2_VXLAN_VNI_OFFSET, + sizeof(mask->vni), spec->vni, + mask->vni, + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; } - if (mask->hdr.src_port) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_SRC, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed."); + (*device_configured) |= local_cfg; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + return 0; +} - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_SRC, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed."); +static int +dpaa2_configure_flow_ecpri(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_ecpri *spec, *mask; + struct rte_flow_item_ecpri local_mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; + uint8_t extract_nb = 0, i; + uint64_t rule_data[DPAA2_ECPRI_MAX_EXTRACT_NB]; + uint64_t mask_data[DPAA2_ECPRI_MAX_EXTRACT_NB]; + uint8_t extract_size[DPAA2_ECPRI_MAX_EXTRACT_NB]; + uint8_t extract_off[DPAA2_ECPRI_MAX_EXTRACT_NB]; - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + group = attr->group; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before SCTP_PORT_SRC set failed"); - return -1; - } + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + if (pattern->mask) { + memcpy(&local_mask, pattern->mask, + sizeof(struct rte_flow_item_ecpri)); + local_mask.hdr.common.u32 = + rte_be_to_cpu_32(local_mask.hdr.common.u32); + mask = &local_mask; + } else { + mask = &dpaa2_flow_item_ecpri_mask; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_SRC, - &spec->hdr.src_port, - &mask->hdr.src_port, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_SCTP_PORT_SRC rule data set failed"); - return -1; - } + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_SRC, - &spec->hdr.src_port, - &mask->hdr.src_port, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_SCTP_PORT_SRC rule data set failed"); - return -1; - } + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-ECPRI distribution not support"); + return -ENOTSUP; } - if (mask->hdr.dst_port) { - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_DST, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed."); + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_ECPRI_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_ECPRI_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } + (*device_configured) |= local_cfg; + return 0; + } - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_DST, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR("FS Extract add SCTP_DST failed."); + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ECPRI); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of ECPRI not support."); - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + return ret; + } - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before SCTP_PORT_DST set failed"); - return -1; - } + if (mask->hdr.common.type != 0xff) { + DPAA2_PMD_WARN("ECPRI header type not specified."); - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_DST, - &spec->hdr.dst_port, - &mask->hdr.dst_port, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_SCTP_PORT_DST rule data set failed"); - return -1; - } + return -EINVAL; + } - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_SCTP, - NH_FLD_SCTP_PORT_DST, - &spec->hdr.dst_port, - &mask->hdr.dst_port, - NH_FLD_SCTP_PORT_SIZE); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_SCTP_PORT_DST rule data set failed"); - return -1; + if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_0; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type0.pc_id) { + rule_data[extract_nb] = spec->hdr.type0.pc_id; + mask_data[extract_nb] = mask->hdr.type0.pc_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_iq_data, pc_id); + extract_nb++; + } + if (mask->hdr.type0.seq_id) { + rule_data[extract_nb] = spec->hdr.type0.seq_id; + mask_data[extract_nb] = mask->hdr.type0.seq_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_iq_data, seq_id); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_BIT_SEQ) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_1; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type1.pc_id) { + rule_data[extract_nb] = spec->hdr.type1.pc_id; + mask_data[extract_nb] = mask->hdr.type1.pc_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_bit_seq, pc_id); + extract_nb++; + } + if (mask->hdr.type1.seq_id) { + rule_data[extract_nb] = spec->hdr.type1.seq_id; + mask_data[extract_nb] = mask->hdr.type1.seq_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_bit_seq, seq_id); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RTC_CTRL) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_2; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type2.rtc_id) { + rule_data[extract_nb] = spec->hdr.type2.rtc_id; + mask_data[extract_nb] = mask->hdr.type2.rtc_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_rtc_ctrl, rtc_id); + extract_nb++; + } + if (mask->hdr.type2.seq_id) { + rule_data[extract_nb] = spec->hdr.type2.seq_id; + mask_data[extract_nb] = mask->hdr.type2.seq_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_rtc_ctrl, seq_id); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_GEN_DATA) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_3; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type3.pc_id || mask->hdr.type3.seq_id) + DPAA2_PMD_WARN("Extract type3 msg not support."); + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RM_ACC) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_4; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type4.rma_id) { + rule_data[extract_nb] = spec->hdr.type4.rma_id; + mask_data[extract_nb] = mask->hdr.type4.rma_id; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + 0; + /** Compiler not support to take address + * of bit-field + * offsetof(struct rte_ecpri_msg_rm_access, + * rma_id); + */ + extract_nb++; + } + if (mask->hdr.type4.ele_id) { + rule_data[extract_nb] = spec->hdr.type4.ele_id; + mask_data[extract_nb] = mask->hdr.type4.ele_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + 2; + /** Compiler not support to take address + * of bit-field + * offsetof(struct rte_ecpri_msg_rm_access, + * ele_id); + */ + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_DLY_MSR) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_5; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type5.msr_id) { + rule_data[extract_nb] = spec->hdr.type5.msr_id; + mask_data[extract_nb] = mask->hdr.type5.msr_id; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_delay_measure, + msr_id); + extract_nb++; + } + if (mask->hdr.type5.act_type) { + rule_data[extract_nb] = spec->hdr.type5.act_type; + mask_data[extract_nb] = mask->hdr.type5.act_type; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_delay_measure, + act_type); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RMT_RST) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_6; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type6.rst_id) { + rule_data[extract_nb] = spec->hdr.type6.rst_id; + mask_data[extract_nb] = mask->hdr.type6.rst_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_remote_reset, + rst_id); + extract_nb++; + } + if (mask->hdr.type6.rst_op) { + rule_data[extract_nb] = spec->hdr.type6.rst_op; + mask_data[extract_nb] = mask->hdr.type6.rst_op; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_remote_reset, + rst_op); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_EVT_IND) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_7; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type7.evt_id) { + rule_data[extract_nb] = spec->hdr.type7.evt_id; + mask_data[extract_nb] = mask->hdr.type7.evt_id; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + evt_id); + extract_nb++; + } + if (mask->hdr.type7.evt_type) { + rule_data[extract_nb] = spec->hdr.type7.evt_type; + mask_data[extract_nb] = mask->hdr.type7.evt_type; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + evt_type); + extract_nb++; + } + if (mask->hdr.type7.seq) { + rule_data[extract_nb] = spec->hdr.type7.seq; + mask_data[extract_nb] = mask->hdr.type7.seq; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + seq); + extract_nb++; + } + if (mask->hdr.type7.number) { + rule_data[extract_nb] = spec->hdr.type7.number; + mask_data[extract_nb] = mask->hdr.type7.number; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + number); + extract_nb++; } + } else { + DPAA2_PMD_ERR("Invalid ecpri header type(%d)", + spec->hdr.common.type); + return -EINVAL; + } + + for (i = 0; i < extract_nb; i++) { + ret = dpaa2_flow_add_pr_extract_rule(flow, + extract_off[i], + extract_size[i], &rule_data[i], &mask_data[i], + priv, group, + device_configured, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + extract_off[i], + extract_size[i], &rule_data[i], &mask_data[i], + priv, group, + device_configured, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; } (*device_configured) |= local_cfg; @@ -2697,168 +3895,78 @@ dpaa2_configure_flow_sctp(struct rte_flow *flow, } static int -dpaa2_configure_flow_gre(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_configure_flow_gtp(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) { - int index, ret; - int local_cfg = 0; + int ret, local_cfg = 0; uint32_t group; - const struct rte_flow_item_gre *spec, *mask; - - const struct rte_flow_item_gre *last __rte_unused; + const struct rte_flow_item_gtp *spec, *mask; struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; group = attr->group; /* Parse pattern list to get the matching parameters */ - spec = (const struct rte_flow_item_gre *)pattern->spec; - last = (const struct rte_flow_item_gre *)pattern->last; - mask = (const struct rte_flow_item_gre *) - (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask); + spec = pattern->spec; + mask = pattern->mask ? + pattern->mask : &dpaa2_flow_item_gtp_mask; /* Get traffic class index and flow id to be configured */ flow->tc_id = group; flow->tc_index = attr->priority; - if (!spec) { - struct proto_discrimination proto; - - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.qos_key_extract, - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "QoS Extract IP protocol to discriminate GRE failed."); - - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } - - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_IP, NH_FLD_IP_PROTO); - if (index < 0) { - ret = dpaa2_flow_proto_discrimination_extract( - &priv->extract.tc_key_extract[group], - DPAA2_FLOW_ITEM_TYPE_GENERIC_IP); - if (ret) { - DPAA2_PMD_ERR( - "FS Extract IP protocol to discriminate GRE failed."); - - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } - - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move IP addr before GRE discrimination set failed"); - return -1; - } + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-GTP distribution not support"); + return -ENOTSUP; + } - proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP; - proto.ip_proto = IPPROTO_GRE; - ret = dpaa2_flow_proto_discrimination_rule(priv, flow, - proto, group); - if (ret) { - DPAA2_PMD_ERR("GRE discrimination rule set failed"); - return -1; - } + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_GTP_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAF_GTP_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; (*device_configured) |= local_cfg; - return 0; } - if (dpaa2_flow_extract_support((const uint8_t *)mask, - RTE_FLOW_ITEM_TYPE_GRE)) { - DPAA2_PMD_WARN("Extract field(s) of GRE not support."); + ret = dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_GTP); + if (ret) { + DPAA2_PMD_WARN("Extract field(s) of GTP not support."); - return -1; + return ret; } - if (!mask->protocol) + if (!mask->teid) return 0; - index = dpaa2_flow_extract_search( - &priv->extract.qos_key_extract.dpkg, - NET_PROT_GRE, NH_FLD_GRE_TYPE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.qos_key_extract, - NET_PROT_GRE, - NH_FLD_GRE_TYPE, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed."); - - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; - } - - index = dpaa2_flow_extract_search( - &priv->extract.tc_key_extract[group].dpkg, - NET_PROT_GRE, NH_FLD_GRE_TYPE); - if (index < 0) { - ret = dpaa2_flow_extract_add( - &priv->extract.tc_key_extract[group], - NET_PROT_GRE, - NH_FLD_GRE_TYPE, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed."); - - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; - } + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GTP, + NH_FLD_GTP_TEID, &spec->teid, + &mask->teid, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; - ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group); - if (ret) { - DPAA2_PMD_ERR( - "Move ipaddr before GRE_TYPE set failed"); - return -1; - } - - ret = dpaa2_flow_rule_data_set( - &priv->extract.qos_key_extract, - &flow->qos_rule, - NET_PROT_GRE, - NH_FLD_GRE_TYPE, - &spec->protocol, - &mask->protocol, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR( - "QoS NH_FLD_GRE_TYPE rule data set failed"); - return -1; - } - - ret = dpaa2_flow_rule_data_set( - &priv->extract.tc_key_extract[group], - &flow->fs_rule, - NET_PROT_GRE, - NH_FLD_GRE_TYPE, - &spec->protocol, - &mask->protocol, - sizeof(rte_be16_t)); - if (ret) { - DPAA2_PMD_ERR( - "FS NH_FLD_GRE_TYPE rule data set failed"); - return -1; - } + ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GTP, + NH_FLD_GTP_TEID, &spec->teid, + &mask->teid, sizeof(rte_be32_t), + priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; (*device_configured) |= local_cfg; @@ -2866,33 +3974,48 @@ dpaa2_configure_flow_gre(struct rte_flow *flow, } static int -dpaa2_configure_flow_raw(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item *pattern, - const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused, - int *device_configured) +dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - const struct rte_flow_item_raw *spec = pattern->spec; - const struct rte_flow_item_raw *mask = pattern->mask; - int prev_key_size = - priv->extract.qos_key_extract.key_info.key_total_size; int local_cfg = 0, ret; uint32_t group; + struct dpaa2_key_extract *qos_key_extract; + struct dpaa2_key_extract *tc_key_extract; + const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item; + const struct rte_flow_item_raw *spec = pattern->spec; + const struct rte_flow_item_raw *mask = pattern->mask; /* Need both spec and mask */ if (!spec || !mask) { DPAA2_PMD_ERR("spec or mask not present."); return -EINVAL; } - /* Only supports non-relative with offset 0 */ - if (spec->relative || spec->offset != 0 || - spec->search || spec->limit) { - DPAA2_PMD_ERR("relative and non zero offset not supported."); + + if (spec->relative) { + /* TBD: relative offset support. + * To support relative offset of previous L3 protocol item, + * extracts should be expanded to identify if the frame is: + * vlan or none-vlan. + * + * To support relative offset of previous L4 protocol item, + * extracts should be expanded to identify if the frame is: + * vlan/IPv4 or vlan/IPv6 or none-vlan/IPv4 or none-vlan/IPv6. + */ + DPAA2_PMD_ERR("relative not supported."); + return -EINVAL; + } + + if (spec->search) { + DPAA2_PMD_ERR("search not supported."); return -EINVAL; } + /* Spec len and mask len should be same */ if (spec->length != mask->length) { DPAA2_PMD_ERR("Spec len and mask len mismatch."); @@ -2904,37 +4027,44 @@ dpaa2_configure_flow_raw(struct rte_flow *flow, flow->tc_id = group; flow->tc_index = attr->priority; - if (prev_key_size <= spec->length) { - ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract, - spec->length); - if (ret) { - DPAA2_PMD_ERR("QoS Extract RAW add failed."); - return -1; - } - local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE; + qos_key_extract = &priv->extract.qos_key_extract; + tc_key_extract = &priv->extract.tc_key_extract[group]; - ret = dpaa2_flow_extract_add_raw( - &priv->extract.tc_key_extract[group], - spec->length); - if (ret) { - DPAA2_PMD_ERR("FS Extract RAW add failed."); - return -1; - } - local_cfg |= DPAA2_FS_TABLE_RECONFIGURE; + ret = dpaa2_flow_extract_add_raw(priv, + spec->offset, spec->length, + DPAA2_FLOW_QOS_TYPE, 0, &local_cfg); + if (ret) { + DPAA2_PMD_ERR("QoS Extract RAW add failed."); + return -EINVAL; + } + + ret = dpaa2_flow_extract_add_raw(priv, + spec->offset, spec->length, + DPAA2_FLOW_FS_TYPE, group, &local_cfg); + if (ret) { + DPAA2_PMD_ERR("FS[%d] Extract RAW add failed.", + group); + return -EINVAL; } - ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern, - mask->pattern, spec->length); + ret = dpaa2_flow_raw_rule_data_set(flow, + &qos_key_extract->key_profile, + spec->offset, spec->length, + spec->pattern, mask->pattern, + DPAA2_FLOW_QOS_TYPE); if (ret) { DPAA2_PMD_ERR("QoS RAW rule data set failed"); - return -1; + return -EINVAL; } - ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern, - mask->pattern, spec->length); + ret = dpaa2_flow_raw_rule_data_set(flow, + &tc_key_extract->key_profile, + spec->offset, spec->length, + spec->pattern, mask->pattern, + DPAA2_FLOW_FS_TYPE); if (ret) { DPAA2_PMD_ERR("FS RAW rule data set failed"); - return -1; + return -EINVAL; } (*device_configured) |= local_cfg; @@ -2943,327 +4073,18 @@ dpaa2_configure_flow_raw(struct rte_flow *flow, } static inline int -dpaa2_fs_action_supported(enum rte_flow_action_type action) -{ - int i; - - for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) / - sizeof(enum rte_flow_action_type)); i++) { - if (action == dpaa2_supported_fs_action_type[i]) - return 1; - } - - return 0; -} -/* The existing QoS/FS entry with IP address(es) - * needs update after - * new extract(s) are inserted before IP - * address(es) extract(s). - */ -static int -dpaa2_flow_entry_update( - struct dpaa2_dev_priv *priv, uint8_t tc_id) -{ - struct rte_flow *curr = LIST_FIRST(&priv->flows); - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - int ret; - int qos_ipsrc_offset = -1, qos_ipdst_offset = -1; - int fs_ipsrc_offset = -1, fs_ipdst_offset = -1; - struct dpaa2_key_extract *qos_key_extract = - &priv->extract.qos_key_extract; - struct dpaa2_key_extract *tc_key_extract = - &priv->extract.tc_key_extract[tc_id]; - char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE]; - char ipdst_key[NH_FLD_IPV6_ADDR_SIZE]; - char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE]; - char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE]; - int extend = -1, extend1, size = -1; - uint16_t qos_index; - - while (curr) { - if (curr->ipaddr_rule.ipaddr_type == - FLOW_NONE_IPADDR) { - curr = LIST_NEXT(curr, next); - continue; - } - - if (curr->ipaddr_rule.ipaddr_type == - FLOW_IPV4_ADDR) { - qos_ipsrc_offset = - qos_key_extract->key_info.ipv4_src_offset; - qos_ipdst_offset = - qos_key_extract->key_info.ipv4_dst_offset; - fs_ipsrc_offset = - tc_key_extract->key_info.ipv4_src_offset; - fs_ipdst_offset = - tc_key_extract->key_info.ipv4_dst_offset; - size = NH_FLD_IPV4_ADDR_SIZE; - } else { - qos_ipsrc_offset = - qos_key_extract->key_info.ipv6_src_offset; - qos_ipdst_offset = - qos_key_extract->key_info.ipv6_dst_offset; - fs_ipsrc_offset = - tc_key_extract->key_info.ipv6_src_offset; - fs_ipdst_offset = - tc_key_extract->key_info.ipv6_dst_offset; - size = NH_FLD_IPV6_ADDR_SIZE; - } - - qos_index = curr->tc_id * priv->fs_entries + - curr->tc_index; - - dpaa2_flow_qos_entry_log("Before update", curr, qos_index, stdout); - - if (priv->num_rx_tc > 1) { - ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, - priv->token, &curr->qos_rule); - if (ret) { - DPAA2_PMD_ERR("Qos entry remove failed."); - return -1; - } - } - - extend = -1; - - if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) { - RTE_ASSERT(qos_ipsrc_offset >= - curr->ipaddr_rule.qos_ipsrc_offset); - extend1 = qos_ipsrc_offset - - curr->ipaddr_rule.qos_ipsrc_offset; - if (extend >= 0) - RTE_ASSERT(extend == extend1); - else - extend = extend1; - - RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) || - (size == NH_FLD_IPV6_ADDR_SIZE)); - - memcpy(ipsrc_key, - (char *)(size_t)curr->qos_rule.key_iova + - curr->ipaddr_rule.qos_ipsrc_offset, - size); - memset((char *)(size_t)curr->qos_rule.key_iova + - curr->ipaddr_rule.qos_ipsrc_offset, - 0, size); - - memcpy(ipsrc_mask, - (char *)(size_t)curr->qos_rule.mask_iova + - curr->ipaddr_rule.qos_ipsrc_offset, - size); - memset((char *)(size_t)curr->qos_rule.mask_iova + - curr->ipaddr_rule.qos_ipsrc_offset, - 0, size); - - curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset; - } - - if (curr->ipaddr_rule.qos_ipdst_offset >= 0) { - RTE_ASSERT(qos_ipdst_offset >= - curr->ipaddr_rule.qos_ipdst_offset); - extend1 = qos_ipdst_offset - - curr->ipaddr_rule.qos_ipdst_offset; - if (extend >= 0) - RTE_ASSERT(extend == extend1); - else - extend = extend1; - - RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) || - (size == NH_FLD_IPV6_ADDR_SIZE)); - - memcpy(ipdst_key, - (char *)(size_t)curr->qos_rule.key_iova + - curr->ipaddr_rule.qos_ipdst_offset, - size); - memset((char *)(size_t)curr->qos_rule.key_iova + - curr->ipaddr_rule.qos_ipdst_offset, - 0, size); - - memcpy(ipdst_mask, - (char *)(size_t)curr->qos_rule.mask_iova + - curr->ipaddr_rule.qos_ipdst_offset, - size); - memset((char *)(size_t)curr->qos_rule.mask_iova + - curr->ipaddr_rule.qos_ipdst_offset, - 0, size); - - curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset; - } - - if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) { - RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) || - (size == NH_FLD_IPV6_ADDR_SIZE)); - memcpy((char *)(size_t)curr->qos_rule.key_iova + - curr->ipaddr_rule.qos_ipsrc_offset, - ipsrc_key, - size); - memcpy((char *)(size_t)curr->qos_rule.mask_iova + - curr->ipaddr_rule.qos_ipsrc_offset, - ipsrc_mask, - size); - } - if (curr->ipaddr_rule.qos_ipdst_offset >= 0) { - RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) || - (size == NH_FLD_IPV6_ADDR_SIZE)); - memcpy((char *)(size_t)curr->qos_rule.key_iova + - curr->ipaddr_rule.qos_ipdst_offset, - ipdst_key, - size); - memcpy((char *)(size_t)curr->qos_rule.mask_iova + - curr->ipaddr_rule.qos_ipdst_offset, - ipdst_mask, - size); - } - - if (extend >= 0) - curr->qos_real_key_size += extend; - - curr->qos_rule.key_size = FIXED_ENTRY_SIZE; - - dpaa2_flow_qos_entry_log("Start update", curr, qos_index, stdout); - - if (priv->num_rx_tc > 1) { - ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, - priv->token, &curr->qos_rule, - curr->tc_id, qos_index, - 0, 0); - if (ret) { - DPAA2_PMD_ERR("Qos entry update failed."); - return -1; - } - } - - if (!dpaa2_fs_action_supported(curr->action)) { - curr = LIST_NEXT(curr, next); - continue; - } - - dpaa2_flow_fs_entry_log("Before update", curr, stdout); - extend = -1; - - ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, - priv->token, curr->tc_id, &curr->fs_rule); - if (ret) { - DPAA2_PMD_ERR("FS entry remove failed."); - return -1; - } - - if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 && - tc_id == curr->tc_id) { - RTE_ASSERT(fs_ipsrc_offset >= - curr->ipaddr_rule.fs_ipsrc_offset); - extend1 = fs_ipsrc_offset - - curr->ipaddr_rule.fs_ipsrc_offset; - if (extend >= 0) - RTE_ASSERT(extend == extend1); - else - extend = extend1; - - memcpy(ipsrc_key, - (char *)(size_t)curr->fs_rule.key_iova + - curr->ipaddr_rule.fs_ipsrc_offset, - size); - memset((char *)(size_t)curr->fs_rule.key_iova + - curr->ipaddr_rule.fs_ipsrc_offset, - 0, size); - - memcpy(ipsrc_mask, - (char *)(size_t)curr->fs_rule.mask_iova + - curr->ipaddr_rule.fs_ipsrc_offset, - size); - memset((char *)(size_t)curr->fs_rule.mask_iova + - curr->ipaddr_rule.fs_ipsrc_offset, - 0, size); - - curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset; - } - - if (curr->ipaddr_rule.fs_ipdst_offset >= 0 && - tc_id == curr->tc_id) { - RTE_ASSERT(fs_ipdst_offset >= - curr->ipaddr_rule.fs_ipdst_offset); - extend1 = fs_ipdst_offset - - curr->ipaddr_rule.fs_ipdst_offset; - if (extend >= 0) - RTE_ASSERT(extend == extend1); - else - extend = extend1; - - memcpy(ipdst_key, - (char *)(size_t)curr->fs_rule.key_iova + - curr->ipaddr_rule.fs_ipdst_offset, - size); - memset((char *)(size_t)curr->fs_rule.key_iova + - curr->ipaddr_rule.fs_ipdst_offset, - 0, size); - - memcpy(ipdst_mask, - (char *)(size_t)curr->fs_rule.mask_iova + - curr->ipaddr_rule.fs_ipdst_offset, - size); - memset((char *)(size_t)curr->fs_rule.mask_iova + - curr->ipaddr_rule.fs_ipdst_offset, - 0, size); - - curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset; - } - - if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) { - memcpy((char *)(size_t)curr->fs_rule.key_iova + - curr->ipaddr_rule.fs_ipsrc_offset, - ipsrc_key, - size); - memcpy((char *)(size_t)curr->fs_rule.mask_iova + - curr->ipaddr_rule.fs_ipsrc_offset, - ipsrc_mask, - size); - } - if (curr->ipaddr_rule.fs_ipdst_offset >= 0) { - memcpy((char *)(size_t)curr->fs_rule.key_iova + - curr->ipaddr_rule.fs_ipdst_offset, - ipdst_key, - size); - memcpy((char *)(size_t)curr->fs_rule.mask_iova + - curr->ipaddr_rule.fs_ipdst_offset, - ipdst_mask, - size); - } - - if (extend >= 0) - curr->fs_real_key_size += extend; - curr->fs_rule.key_size = FIXED_ENTRY_SIZE; - - dpaa2_flow_fs_entry_log("Start update", curr, stdout); - - ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, - priv->token, curr->tc_id, curr->tc_index, - &curr->fs_rule, &curr->action_cfg); - if (ret) { - DPAA2_PMD_ERR("FS entry update failed."); - return -1; - } - - curr = LIST_NEXT(curr, next); - } - - return 0; -} - -static inline int -dpaa2_flow_verify_attr( - struct dpaa2_dev_priv *priv, +dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv, const struct rte_flow_attr *attr) { - struct rte_flow *curr = LIST_FIRST(&priv->flows); + struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); while (curr) { if (curr->tc_id == attr->group && curr->tc_index == attr->priority) { - DPAA2_PMD_ERR( - "Flow with group %d and priority %d already exists.", + DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists", attr->group, attr->priority); - return -1; + return -EINVAL; } curr = LIST_NEXT(curr, next); } @@ -3276,18 +4097,16 @@ dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv, const struct rte_flow_action *action) { const struct rte_flow_action_port_id *port_id; + const struct rte_flow_action_ethdev *ethdev; int idx = -1; struct rte_eth_dev *dest_dev; if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { - port_id = (const struct rte_flow_action_port_id *) - action->conf; + port_id = action->conf; if (!port_id->original) idx = port_id->id; } else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) { - const struct rte_flow_action_ethdev *ethdev; - - ethdev = (const struct rte_flow_action_ethdev *)action->conf; + ethdev = action->conf; idx = ethdev->port_id; } else { return NULL; @@ -3296,20 +4115,18 @@ dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv, if (idx >= 0) { if (!rte_eth_dev_is_valid_port(idx)) return NULL; + if (!rte_pmd_dpaa2_dev_is_dpaa2(idx)) + return NULL; dest_dev = &rte_eth_devices[idx]; } else { dest_dev = priv->eth_dev; } - if (!dpaa2_dev_is_dpaa2(dest_dev)) - return NULL; - return dest_dev; } static inline int -dpaa2_flow_verify_action( - struct dpaa2_dev_priv *priv, +dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv, const struct rte_flow_attr *attr, const struct rte_flow_action actions[]) { @@ -3321,15 +4138,14 @@ dpaa2_flow_verify_action( while (!end_of_list) { switch (actions[j].type) { case RTE_FLOW_ACTION_TYPE_QUEUE: - dest_queue = (const struct rte_flow_action_queue *) - (actions[j].conf); + dest_queue = actions[j].conf; rxq = priv->rx_vq[dest_queue->index]; if (attr->group != rxq->tc_index) { - DPAA2_PMD_ERR( - "RXQ[%d] does not belong to the group %d", - dest_queue->index, attr->group); + DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]", + rxq->tc_index, rxq->flow_id, + attr->group); - return -1; + return -ENOTSUP; } break; case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: @@ -3343,24 +4159,24 @@ dpaa2_flow_verify_action( rss_conf = (const struct rte_flow_action_rss *) (actions[j].conf); if (rss_conf->queue_num > priv->dist_queues) { - DPAA2_PMD_ERR( - "RSS number exceeds the distribution size"); + DPAA2_PMD_ERR("RSS number too large"); return -ENOTSUP; } for (i = 0; i < (int)rss_conf->queue_num; i++) { if (rss_conf->queue[i] >= priv->nb_rx_queues) { - DPAA2_PMD_ERR( - "RSS queue index exceeds the number of RXQs"); + DPAA2_PMD_ERR("RSS queue not in range"); return -ENOTSUP; } rxq = priv->rx_vq[rss_conf->queue[i]]; if (rxq->tc_index != attr->group) { - DPAA2_PMD_ERR( - "Queue/Group combination are not supported"); + DPAA2_PMD_ERR("RSS queue not in group"); return -ENOTSUP; } } + break; + case RTE_FLOW_ACTION_TYPE_PF: + /* Skip this action, have to add for vxlan */ break; case RTE_FLOW_ACTION_TYPE_END: end_of_list = 1; @@ -3376,28 +4192,302 @@ dpaa2_flow_verify_action( } static int -dpaa2_generic_flow_set(struct rte_flow *flow, - struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv, + struct dpaa2_dev_flow *flow, + const struct rte_flow_action *rte_action) { + struct rte_eth_dev *dest_dev; + struct dpaa2_dev_priv *dest_priv; const struct rte_flow_action_queue *dest_queue; + struct dpaa2_queue *dest_q; + + memset(&flow->fs_action_cfg, 0, + sizeof(struct dpni_fs_action_cfg)); + flow->action_type = rte_action->type; + + if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) { + dest_queue = rte_action->conf; + dest_q = priv->rx_vq[dest_queue->index]; + flow->fs_action_cfg.flow_id = dest_q->flow_id; + } else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID || + flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) { + dest_dev = dpaa2_flow_redirect_dev(priv, rte_action); + if (!dest_dev) { + DPAA2_PMD_ERR("Invalid device to redirect"); + return -EINVAL; + } + + dest_priv = dest_dev->data->dev_private; + dest_q = dest_priv->tx_vq[0]; + flow->fs_action_cfg.options = + DPNI_FS_OPT_REDIRECT_TO_DPNI_TX; + flow->fs_action_cfg.redirect_obj_token = + dest_priv->token; + flow->fs_action_cfg.flow_id = dest_q->flow_id; + } + + return 0; +} + +static inline uint16_t +dpaa2_flow_entry_size(uint16_t key_max_size) +{ + if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) { + DPAA2_PMD_ERR("Key size(%d) > max(%d)", + key_max_size, + DPAA2_FLOW_ENTRY_MAX_SIZE); + + return 0; + } + + if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE) + return DPAA2_FLOW_ENTRY_MAX_SIZE; + + /* Current MC only support fixed entry size(56)*/ + return DPAA2_FLOW_ENTRY_MAX_SIZE; +} + +static inline int +dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv, + uint8_t tc_id) +{ + struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); + int need_clear = 0, ret; + struct fsl_mc_io *dpni = priv->hw; + + while (curr) { + if (curr->tc_id == tc_id) { + need_clear = 1; + break; + } + curr = LIST_NEXT(curr, next); + } + + if (need_clear) { + ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW, + priv->token, tc_id); + if (ret) { + DPAA2_PMD_ERR("TC[%d] clear failed", tc_id); + return ret; + } + } + + return 0; +} + +static int +dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv, + uint8_t tc_id, uint16_t dist_size, int rss_dist) +{ + struct dpaa2_key_extract *tc_extract; + uint8_t *key_cfg_buf; + uint64_t key_cfg_iova; + int ret; + struct dpni_rx_dist_cfg tc_cfg; + struct fsl_mc_io *dpni = priv->hw; + uint16_t entry_size; + uint16_t key_max_size; + + ret = dpaa2_flow_clear_fs_table(priv, tc_id); + if (ret < 0) { + DPAA2_PMD_ERR("TC[%d] clear failed", tc_id); + return ret; + } + + tc_extract = &priv->extract.tc_key_extract[tc_id]; + key_cfg_buf = priv->extract.tc_extract_param[tc_id]; + key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_buf, + DPAA2_EXTRACT_PARAM_MAX_SIZE); + if (key_cfg_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", + __func__, key_cfg_buf); + + return -ENOBUFS; + } + + key_max_size = tc_extract->key_profile.key_max_size; + entry_size = dpaa2_flow_entry_size(key_max_size); + + dpaa2_flow_fs_extracts_log(priv, tc_id); + ret = dpkg_prepare_key_cfg(&tc_extract->dpkg, + key_cfg_buf); + if (ret < 0) { + DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id); + return ret; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg)); + tc_cfg.dist_size = dist_size; + tc_cfg.key_cfg_iova = key_cfg_iova; + if (rss_dist) + tc_cfg.enable = true; + else + tc_cfg.enable = false; + tc_cfg.tc = tc_id; + ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, + priv->token, &tc_cfg); + if (ret < 0) { + if (rss_dist) { + DPAA2_PMD_ERR("RSS TC[%d] set failed", + tc_id); + } else { + DPAA2_PMD_ERR("FS TC[%d] hash disable failed", + tc_id); + } + + return ret; + } + + if (rss_dist) + return 0; + + tc_cfg.enable = true; + tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id; + ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW, + priv->token, &tc_cfg); + if (ret < 0) { + DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id); + return ret; + } + + ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE, + entry_size, tc_id); + if (ret) + return ret; + + return 0; +} + +static int +dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv, + int rss_dist) +{ + struct dpaa2_key_extract *qos_extract; + uint8_t *key_cfg_buf; + uint64_t key_cfg_iova; + int ret; + struct dpni_qos_tbl_cfg qos_cfg; + struct fsl_mc_io *dpni = priv->hw; + uint16_t entry_size; + uint16_t key_max_size; + + if (!rss_dist && priv->num_rx_tc <= 1) { + /* QoS table is effecitive for FS multiple TCs or RSS.*/ + return 0; + } + + if (LIST_FIRST(&priv->flows)) { + ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW, + priv->token); + if (ret < 0) { + DPAA2_PMD_ERR("QoS table clear failed"); + return ret; + } + } + + qos_extract = &priv->extract.qos_key_extract; + key_cfg_buf = priv->extract.qos_extract_param; + key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_buf, + DPAA2_EXTRACT_PARAM_MAX_SIZE); + if (key_cfg_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", + __func__, key_cfg_buf); + + return -ENOBUFS; + } + + key_max_size = qos_extract->key_profile.key_max_size; + entry_size = dpaa2_flow_entry_size(key_max_size); + + dpaa2_flow_qos_extracts_log(priv); + + ret = dpkg_prepare_key_cfg(&qos_extract->dpkg, + key_cfg_buf); + if (ret < 0) { + DPAA2_PMD_ERR("QoS prepare extract failed"); + return ret; + } + memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg)); + qos_cfg.keep_entries = true; + qos_cfg.key_cfg_iova = key_cfg_iova; + if (rss_dist) { + qos_cfg.discard_on_miss = true; + } else { + qos_cfg.discard_on_miss = false; + qos_cfg.default_tc = 0; + } + + ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR("QoS table set failed"); + return ret; + } + + ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE, + entry_size, 0); + if (ret) + return ret; + + return 0; +} + +static int +dpaa2_flow_item_convert(const struct rte_flow_item pattern[], + struct rte_dpaa2_flow_item **dpaa2_pattern) +{ + struct rte_dpaa2_flow_item *new_pattern; + int num = 0, tunnel_start = 0; + + while (1) { + num++; + if (pattern[num].type == RTE_FLOW_ITEM_TYPE_END) + break; + } + + new_pattern = rte_malloc(NULL, sizeof(struct rte_dpaa2_flow_item) * num, + RTE_CACHE_LINE_SIZE); + if (!new_pattern) { + DPAA2_PMD_ERR("Failed to alloc %d flow items", num); + return -ENOMEM; + } + + num = 0; + while (pattern[num].type != RTE_FLOW_ITEM_TYPE_END) { + memcpy(&new_pattern[num].generic_item, &pattern[num], + sizeof(struct rte_flow_item)); + new_pattern[num].in_tunnel = 0; + + if (pattern[num].type == RTE_FLOW_ITEM_TYPE_VXLAN) + tunnel_start = 1; + else if (tunnel_start) + new_pattern[num].in_tunnel = 1; + num++; + } + + new_pattern[num].generic_item.type = RTE_FLOW_ITEM_TYPE_END; + *dpaa2_pattern = new_pattern; + + return 0; +} + +static int +dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ const struct rte_flow_action_rss *rss_conf; int is_keycfg_configured = 0, end_of_list = 0; int ret = 0, i = 0, j = 0; - struct dpni_rx_dist_cfg tc_cfg; - struct dpni_qos_tbl_cfg qos_cfg; - struct dpni_fs_action_cfg action; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct dpaa2_queue *dest_q; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - size_t param; - struct rte_flow *curr = LIST_FIRST(&priv->flows); - uint16_t qos_index; - struct rte_eth_dev *dest_dev; - struct dpaa2_dev_priv *dest_priv; + struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows); + uint16_t dist_size, key_size; + struct dpaa2_key_extract *qos_key_extract; + struct dpaa2_key_extract *tc_key_extract; + struct rte_dpaa2_flow_item *dpaa2_pattern = NULL; ret = dpaa2_flow_verify_attr(priv, attr); if (ret) @@ -3407,103 +4497,183 @@ dpaa2_generic_flow_set(struct rte_flow *flow, if (ret) return ret; + ret = dpaa2_flow_item_convert(pattern, &dpaa2_pattern); + if (ret) + return ret; + /* Parse pattern list to get the matching parameters */ while (!end_of_list) { switch (pattern[i].type) { case RTE_FLOW_ITEM_TYPE_ETH: - ret = dpaa2_configure_flow_eth(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_eth(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("ETH flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("ETH flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_VLAN: - ret = dpaa2_configure_flow_vlan(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_vlan(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("vLan flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("vLan flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_IPV4: + ret = dpaa2_configure_flow_ipv4(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("IPV4 flow config failed!"); + goto end_flow_set; + } + break; case RTE_FLOW_ITEM_TYPE_IPV6: - ret = dpaa2_configure_flow_generic_ip(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_ipv6(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("IP flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("IPV6 flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_ICMP: - ret = dpaa2_configure_flow_icmp(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_icmp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("ICMP flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("ICMP flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_UDP: - ret = dpaa2_configure_flow_udp(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_udp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("UDP flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("UDP flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_TCP: - ret = dpaa2_configure_flow_tcp(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_tcp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("TCP flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("TCP flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_SCTP: - ret = dpaa2_configure_flow_sctp(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_sctp(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("SCTP flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("SCTP flow config failed!"); + goto end_flow_set; + } + break; + case RTE_FLOW_ITEM_TYPE_ESP: + ret = dpaa2_configure_flow_esp(flow, + dev, attr, &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("ESP flow config failed!"); + goto end_flow_set; + } + break; + case RTE_FLOW_ITEM_TYPE_AH: + ret = dpaa2_configure_flow_ah(flow, + dev, attr, &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("AH flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_GRE: - ret = dpaa2_configure_flow_gre(flow, - dev, attr, &pattern[i], actions, error, + ret = dpaa2_configure_flow_gre(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("GRE flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("GRE flow config failed!"); + goto end_flow_set; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + ret = dpaa2_configure_flow_vxlan(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("VXLAN flow config failed!"); + goto end_flow_set; + } + break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + ret = dpaa2_configure_flow_ecpri(flow, + dev, attr, &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("ECPRI flow config failed!"); + goto end_flow_set; + } + break; + case RTE_FLOW_ITEM_TYPE_GTP: + ret = dpaa2_configure_flow_gtp(flow, + dev, attr, &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("GTP flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_RAW: - ret = dpaa2_configure_flow_raw(flow, - dev, attr, &pattern[i], - actions, error, - &is_keycfg_configured); + ret = dpaa2_configure_flow_raw(flow, dev, attr, + &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); if (ret) { - DPAA2_PMD_ERR("RAW flow configuration failed!"); - return ret; + DPAA2_PMD_ERR("RAW flow config failed!"); + goto end_flow_set; } break; case RTE_FLOW_ITEM_TYPE_END: end_of_list = 1; break; /*End of List*/ default: - DPAA2_PMD_ERR("Invalid action type"); + DPAA2_PMD_ERR("Invalid flow item[%d] type(%d)", + i, pattern[i].type); ret = -ENOTSUP; break; } i++; } + qos_key_extract = &priv->extract.qos_key_extract; + key_size = qos_key_extract->key_profile.key_max_size; + flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size); + + tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id]; + key_size = tc_key_extract->key_profile.key_max_size; + flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size); + /* Let's parse action on matching traffic */ end_of_list = 0; while (!end_of_list) { @@ -3511,150 +4681,33 @@ dpaa2_generic_flow_set(struct rte_flow *flow, case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: case RTE_FLOW_ACTION_TYPE_PORT_ID: - memset(&action, 0, sizeof(struct dpni_fs_action_cfg)); - flow->action = actions[j].type; - - if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) { - dest_queue = (const struct rte_flow_action_queue *) - (actions[j].conf); - dest_q = priv->rx_vq[dest_queue->index]; - action.flow_id = dest_q->flow_id; - } else { - dest_dev = dpaa2_flow_redirect_dev(priv, - &actions[j]); - if (!dest_dev) { - DPAA2_PMD_ERR("Invalid destination device to redirect!"); - return -1; - } - - dest_priv = dest_dev->data->dev_private; - dest_q = dest_priv->tx_vq[0]; - action.options = - DPNI_FS_OPT_REDIRECT_TO_DPNI_TX; - action.redirect_obj_token = dest_priv->token; - action.flow_id = dest_q->flow_id; - } + ret = dpaa2_configure_flow_fs_action(priv, flow, + &actions[j]); + if (ret) + goto end_flow_set; /* Configure FS table first*/ - if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { - dpaa2_flow_fs_table_extracts_log(priv, - flow->tc_id, stdout); - if (dpkg_prepare_key_cfg( - &priv->extract.tc_key_extract[flow->tc_id].dpkg, - (uint8_t *)(size_t)priv->extract - .tc_extract_param[flow->tc_id]) < 0) { - DPAA2_PMD_ERR( - "Unable to prepare extract parameters"); - return -1; - } - - memset(&tc_cfg, 0, - sizeof(struct dpni_rx_dist_cfg)); - tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc; - tc_cfg.key_cfg_iova = - (uint64_t)priv->extract.tc_extract_param[flow->tc_id]; - tc_cfg.tc = flow->tc_id; - tc_cfg.enable = false; - ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, - priv->token, &tc_cfg); - if (ret < 0) { - DPAA2_PMD_ERR( - "TC hash cannot be disabled.(%d)", - ret); - return -1; - } - tc_cfg.enable = true; - tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id; - ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW, - priv->token, &tc_cfg); - if (ret < 0) { - DPAA2_PMD_ERR( - "TC distribution cannot be configured.(%d)", - ret); - return -1; - } + dist_size = priv->nb_rx_queues / priv->num_rx_tc; + if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) { + ret = dpaa2_configure_fs_rss_table(priv, + flow->tc_id, + dist_size, + false); + if (ret) + goto end_flow_set; } /* Configure QoS table then.*/ - if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) { - dpaa2_flow_qos_table_extracts_log(priv, stdout); - if (dpkg_prepare_key_cfg( - &priv->extract.qos_key_extract.dpkg, - (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { - DPAA2_PMD_ERR( - "Unable to prepare extract parameters"); - return -1; - } - - memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg)); - qos_cfg.discard_on_miss = false; - qos_cfg.default_tc = 0; - qos_cfg.keep_entries = true; - qos_cfg.key_cfg_iova = - (size_t)priv->extract.qos_extract_param; - /* QoS table is effective for multiple TCs. */ - if (priv->num_rx_tc > 1) { - ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, - priv->token, &qos_cfg); - if (ret < 0) { - DPAA2_PMD_ERR( - "RSS QoS table can not be configured(%d)", - ret); - return -1; - } - } - } - - flow->qos_real_key_size = priv->extract - .qos_key_extract.key_info.key_total_size; - if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) { - if (flow->ipaddr_rule.qos_ipdst_offset >= - flow->ipaddr_rule.qos_ipsrc_offset) { - flow->qos_real_key_size = - flow->ipaddr_rule.qos_ipdst_offset + - NH_FLD_IPV4_ADDR_SIZE; - } else { - flow->qos_real_key_size = - flow->ipaddr_rule.qos_ipsrc_offset + - NH_FLD_IPV4_ADDR_SIZE; - } - } else if (flow->ipaddr_rule.ipaddr_type == - FLOW_IPV6_ADDR) { - if (flow->ipaddr_rule.qos_ipdst_offset >= - flow->ipaddr_rule.qos_ipsrc_offset) { - flow->qos_real_key_size = - flow->ipaddr_rule.qos_ipdst_offset + - NH_FLD_IPV6_ADDR_SIZE; - } else { - flow->qos_real_key_size = - flow->ipaddr_rule.qos_ipsrc_offset + - NH_FLD_IPV6_ADDR_SIZE; - } + if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_configure_qos_table(priv, false); + if (ret) + goto end_flow_set; } - /* QoS entry added is only effective for multiple TCs.*/ if (priv->num_rx_tc > 1) { - qos_index = flow->tc_id * priv->fs_entries + - flow->tc_index; - if (qos_index >= priv->qos_entries) { - DPAA2_PMD_ERR("QoS table with %d entries full", - priv->qos_entries); - return -1; - } - flow->qos_rule.key_size = FIXED_ENTRY_SIZE; - - dpaa2_flow_qos_entry_log("Start add", flow, - qos_index, stdout); - - ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, - priv->token, &flow->qos_rule, - flow->tc_id, qos_index, - 0, 0); - if (ret < 0) { - DPAA2_PMD_ERR( - "Error in adding entry to QoS table(%d)", ret); - return ret; - } + ret = dpaa2_flow_add_qos_rule(priv, flow); + if (ret) + goto end_flow_set; } if (flow->tc_index >= priv->fs_entries) { @@ -3663,140 +4716,50 @@ dpaa2_generic_flow_set(struct rte_flow *flow, return -1; } - flow->fs_real_key_size = - priv->extract.tc_key_extract[flow->tc_id] - .key_info.key_total_size; - - if (flow->ipaddr_rule.ipaddr_type == - FLOW_IPV4_ADDR) { - if (flow->ipaddr_rule.fs_ipdst_offset >= - flow->ipaddr_rule.fs_ipsrc_offset) { - flow->fs_real_key_size = - flow->ipaddr_rule.fs_ipdst_offset + - NH_FLD_IPV4_ADDR_SIZE; - } else { - flow->fs_real_key_size = - flow->ipaddr_rule.fs_ipsrc_offset + - NH_FLD_IPV4_ADDR_SIZE; - } - } else if (flow->ipaddr_rule.ipaddr_type == - FLOW_IPV6_ADDR) { - if (flow->ipaddr_rule.fs_ipdst_offset >= - flow->ipaddr_rule.fs_ipsrc_offset) { - flow->fs_real_key_size = - flow->ipaddr_rule.fs_ipdst_offset + - NH_FLD_IPV6_ADDR_SIZE; - } else { - flow->fs_real_key_size = - flow->ipaddr_rule.fs_ipsrc_offset + - NH_FLD_IPV6_ADDR_SIZE; - } - } - - flow->fs_rule.key_size = FIXED_ENTRY_SIZE; - - dpaa2_flow_fs_entry_log("Start add", flow, stdout); + ret = dpaa2_flow_add_fs_rule(priv, flow); + if (ret) + goto end_flow_set; - ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token, - flow->tc_id, flow->tc_index, - &flow->fs_rule, &action); - if (ret < 0) { - DPAA2_PMD_ERR( - "Error in adding entry to FS table(%d)", ret); - return ret; - } - memcpy(&flow->action_cfg, &action, - sizeof(struct dpni_fs_action_cfg)); break; case RTE_FLOW_ACTION_TYPE_RSS: - rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf); + rss_conf = actions[j].conf; + flow->action_type = RTE_FLOW_ACTION_TYPE_RSS; - flow->action = RTE_FLOW_ACTION_TYPE_RSS; ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types, - &priv->extract.tc_key_extract[flow->tc_id].dpkg); + &tc_key_extract->dpkg); if (ret < 0) { - DPAA2_PMD_ERR( - "unable to set flow distribution.please check queue config"); - return ret; - } - - /* Allocate DMA'ble memory to write the rules */ - param = (size_t)rte_malloc(NULL, 256, 64); - if (!param) { - DPAA2_PMD_ERR("Memory allocation failure"); - return -1; + DPAA2_PMD_ERR("TC[%d] distset RSS failed", + flow->tc_id); + goto end_flow_set; } - if (dpkg_prepare_key_cfg( - &priv->extract.tc_key_extract[flow->tc_id].dpkg, - (uint8_t *)param) < 0) { - DPAA2_PMD_ERR( - "Unable to prepare extract parameters"); - rte_free((void *)param); - return -1; + dist_size = rss_conf->queue_num; + if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) { + ret = dpaa2_configure_fs_rss_table(priv, + flow->tc_id, + dist_size, + true); + if (ret) + goto end_flow_set; } - memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg)); - tc_cfg.dist_size = rss_conf->queue_num; - tc_cfg.key_cfg_iova = (size_t)param; - tc_cfg.enable = true; - tc_cfg.tc = flow->tc_id; - ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, - priv->token, &tc_cfg); - if (ret < 0) { - DPAA2_PMD_ERR( - "RSS TC table cannot be configured: %d", - ret); - rte_free((void *)param); - return -1; + if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) { + ret = dpaa2_configure_qos_table(priv, true); + if (ret) + goto end_flow_set; } - rte_free((void *)param); - if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) { - if (dpkg_prepare_key_cfg( - &priv->extract.qos_key_extract.dpkg, - (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { - DPAA2_PMD_ERR( - "Unable to prepare extract parameters"); - return -1; - } - memset(&qos_cfg, 0, - sizeof(struct dpni_qos_tbl_cfg)); - qos_cfg.discard_on_miss = true; - qos_cfg.keep_entries = true; - qos_cfg.key_cfg_iova = - (size_t)priv->extract.qos_extract_param; - ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, - priv->token, &qos_cfg); - if (ret < 0) { - DPAA2_PMD_ERR( - "RSS QoS dist can't be configured-%d", - ret); - return -1; - } - } + ret = dpaa2_flow_add_qos_rule(priv, flow); + if (ret) + goto end_flow_set; - /* Add Rule into QoS table */ - qos_index = flow->tc_id * priv->fs_entries + - flow->tc_index; - if (qos_index >= priv->qos_entries) { - DPAA2_PMD_ERR("QoS table with %d entries full", - priv->qos_entries); - return -1; - } + ret = dpaa2_flow_add_fs_rule(priv, flow); + if (ret) + goto end_flow_set; - flow->qos_real_key_size = - priv->extract.qos_key_extract.key_info.key_total_size; - flow->qos_rule.key_size = FIXED_ENTRY_SIZE; - ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, - &flow->qos_rule, flow->tc_id, - qos_index, 0, 0); - if (ret < 0) { - DPAA2_PMD_ERR( - "Error in entry addition in QoS table(%d)", - ret); - return ret; - } + break; + case RTE_FLOW_ACTION_TYPE_PF: + /* Skip this action, have to add for vxlan */ break; case RTE_FLOW_ACTION_TYPE_END: end_of_list = 1; @@ -3809,17 +4772,8 @@ dpaa2_generic_flow_set(struct rte_flow *flow, j++; } +end_flow_set: if (!ret) { - if (is_keycfg_configured & - (DPAA2_QOS_TABLE_RECONFIGURE | - DPAA2_FS_TABLE_RECONFIGURE)) { - ret = dpaa2_flow_entry_update(priv, flow->tc_id); - if (ret) { - DPAA2_PMD_ERR("Flow entry update failed."); - - return -1; - } - } /* New rules are inserted. */ if (!curr) { LIST_INSERT_HEAD(&priv->flows, flow, next); @@ -3829,26 +4783,31 @@ dpaa2_generic_flow_set(struct rte_flow *flow, LIST_INSERT_AFTER(curr, flow, next); } } + + if (dpaa2_pattern) + rte_free(dpaa2_pattern); + return ret; } static inline int dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr, - const struct rte_flow_attr *attr) + const struct rte_flow_attr *attr) { int ret = 0; if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) { - DPAA2_PMD_ERR("Priority group is out of range"); + DPAA2_PMD_ERR("Group/TC(%d) is out of range(%d)", + attr->group, dpni_attr->num_rx_tcs); ret = -ENOTSUP; } if (unlikely(attr->priority >= dpni_attr->fs_entries)) { - DPAA2_PMD_ERR("Priority within the group is out of range"); + DPAA2_PMD_ERR("Priority(%d) within group is out of range(%d)", + attr->priority, dpni_attr->fs_entries); ret = -ENOTSUP; } if (unlikely(attr->egress)) { - DPAA2_PMD_ERR( - "Flow configuration is not supported on egress side"); + DPAA2_PMD_ERR("Egress flow configuration is not supported"); ret = -ENOTSUP; } if (unlikely(!attr->ingress)) { @@ -3863,27 +4822,41 @@ dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[]) { unsigned int i, j, is_found = 0; int ret = 0; + const enum rte_flow_item_type *hp_supported; + const enum rte_flow_item_type *sp_supported; + uint64_t hp_supported_num, sp_supported_num; + + hp_supported = dpaa2_hp_supported_pattern_type; + hp_supported_num = RTE_DIM(dpaa2_hp_supported_pattern_type); + + sp_supported = dpaa2_sp_supported_pattern_type; + sp_supported_num = RTE_DIM(dpaa2_sp_supported_pattern_type); for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { - for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) { - if (dpaa2_supported_pattern_type[i] - == pattern[j].type) { + is_found = 0; + for (i = 0; i < hp_supported_num; i++) { + if (hp_supported[i] == pattern[j].type) { is_found = 1; break; } } + if (is_found) + continue; + if (dpaa2_sp_loaded > 0) { + for (i = 0; i < sp_supported_num; i++) { + if (sp_supported[i] == pattern[j].type) { + is_found = 1; + break; + } + } + } if (!is_found) { + DPAA2_PMD_WARN("Flow type(%d) not supported", + pattern[j].type); ret = -ENOTSUP; break; } } - /* Lets verify other combinations of given pattern rules */ - for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { - if (!pattern[j].spec) { - ret = -EINVAL; - break; - } - } return ret; } @@ -3908,18 +4881,18 @@ dpaa2_dev_verify_actions(const struct rte_flow_action actions[]) } for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP && - !actions[j].conf) + !actions[j].conf) ret = -EINVAL; } return ret; } -static -int dpaa2_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *flow_attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +static int +dpaa2_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *flow_attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpni_attr dpni_attr; @@ -3930,170 +4903,200 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev, memset(&dpni_attr, 0, sizeof(struct dpni_attr)); ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr); if (ret < 0) { - DPAA2_PMD_ERR( - "Failure to get dpni@%p attribute, err code %d", - dpni, ret); + DPAA2_PMD_ERR("Get dpni@%d attribute failed(%d)", + priv->hw_id, ret); rte_flow_error_set(error, EPERM, - RTE_FLOW_ERROR_TYPE_ATTR, - flow_attr, "invalid"); + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); return ret; } /* Verify input attributes */ ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr); if (ret < 0) { - DPAA2_PMD_ERR( - "Invalid attributes are given"); + DPAA2_PMD_ERR("Invalid attributes are given"); rte_flow_error_set(error, EPERM, - RTE_FLOW_ERROR_TYPE_ATTR, - flow_attr, "invalid"); + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); goto not_valid_params; } /* Verify input pattern list */ ret = dpaa2_dev_verify_patterns(pattern); if (ret < 0) { - DPAA2_PMD_ERR( - "Invalid pattern list is given"); + DPAA2_PMD_ERR("Invalid pattern list is given"); rte_flow_error_set(error, EPERM, - RTE_FLOW_ERROR_TYPE_ITEM, - pattern, "invalid"); + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "invalid"); goto not_valid_params; } /* Verify input action list */ ret = dpaa2_dev_verify_actions(actions); if (ret < 0) { - DPAA2_PMD_ERR( - "Invalid action list is given"); + DPAA2_PMD_ERR("Invalid action list is given"); rte_flow_error_set(error, EPERM, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, "invalid"); + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "invalid"); goto not_valid_params; } not_valid_params: return ret; } -static -struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +static struct rte_flow * +dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { - struct rte_flow *flow = NULL; - size_t key_iova = 0, mask_iova = 0; + struct dpaa2_dev_flow *flow = NULL; + struct dpaa2_dev_priv *priv = dev->data->dev_private; int ret; + uint64_t iova; dpaa2_flow_control_log = getenv("DPAA2_FLOW_CONTROL_LOG"); if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) { - struct dpaa2_dev_priv *priv = dev->data->dev_private; - dpaa2_flow_miss_flow_id = - atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")); + (uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")); if (dpaa2_flow_miss_flow_id >= priv->dist_queues) { - DPAA2_PMD_ERR( - "The missed flow ID %d exceeds the max flow ID %d", - dpaa2_flow_miss_flow_id, - priv->dist_queues - 1); + DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)", + dpaa2_flow_miss_flow_id, + priv->dist_queues); return NULL; } } - flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE); + flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow), + RTE_CACHE_LINE_SIZE); if (!flow) { DPAA2_PMD_ERR("Failure to allocate memory for flow"); goto mem_failure; } - /* Allocate DMA'ble memory to write the rules */ - key_iova = (size_t)rte_zmalloc(NULL, 256, 64); - if (!key_iova) { - DPAA2_PMD_ERR( - "Memory allocation failure for rule configuration"); + + /* Allocate DMA'ble memory to write the qos rules */ + flow->qos_key_addr = rte_zmalloc(NULL, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE); + if (!flow->qos_key_addr) { + DPAA2_PMD_ERR("Memory allocation failed"); goto mem_failure; } - mask_iova = (size_t)rte_zmalloc(NULL, 256, 64); - if (!mask_iova) { - DPAA2_PMD_ERR( - "Memory allocation failure for rule configuration"); + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->qos_key_addr, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE); + if (iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for qos key(%p)", + __func__, flow->qos_key_addr); goto mem_failure; } + flow->qos_rule.key_iova = iova; - flow->qos_rule.key_iova = key_iova; - flow->qos_rule.mask_iova = mask_iova; + flow->qos_mask_addr = rte_zmalloc(NULL, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE); + if (!flow->qos_mask_addr) { + DPAA2_PMD_ERR("Memory allocation failed"); + goto mem_failure; + } + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->qos_mask_addr, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE); + if (iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for qos mask(%p)", + __func__, flow->qos_mask_addr); + goto mem_failure; + } + flow->qos_rule.mask_iova = iova; - /* Allocate DMA'ble memory to write the rules */ - key_iova = (size_t)rte_zmalloc(NULL, 256, 64); - if (!key_iova) { - DPAA2_PMD_ERR( - "Memory allocation failure for rule configuration"); + /* Allocate DMA'ble memory to write the FS rules */ + flow->fs_key_addr = rte_zmalloc(NULL, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE); + if (!flow->fs_key_addr) { + DPAA2_PMD_ERR("Memory allocation failed"); goto mem_failure; } - mask_iova = (size_t)rte_zmalloc(NULL, 256, 64); - if (!mask_iova) { - DPAA2_PMD_ERR( - "Memory allocation failure for rule configuration"); + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->fs_key_addr, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE); + if (iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for fs key(%p)", + __func__, flow->fs_key_addr); goto mem_failure; } + flow->fs_rule.key_iova = iova; - flow->fs_rule.key_iova = key_iova; - flow->fs_rule.mask_iova = mask_iova; + flow->fs_mask_addr = rte_zmalloc(NULL, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE); + if (!flow->fs_mask_addr) { + DPAA2_PMD_ERR("Memory allocation failed"); + goto mem_failure; + } + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->fs_mask_addr, + DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE); + if (iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for fs mask(%p)", + __func__, flow->fs_mask_addr); + goto mem_failure; + } + flow->fs_rule.mask_iova = iova; - flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR; - flow->ipaddr_rule.qos_ipsrc_offset = - IP_ADDRESS_OFFSET_INVALID; - flow->ipaddr_rule.qos_ipdst_offset = - IP_ADDRESS_OFFSET_INVALID; - flow->ipaddr_rule.fs_ipsrc_offset = - IP_ADDRESS_OFFSET_INVALID; - flow->ipaddr_rule.fs_ipdst_offset = - IP_ADDRESS_OFFSET_INVALID; + priv->curr = flow; - ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, - actions, error); + ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error); if (ret < 0) { if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION) rte_flow_error_set(error, EPERM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - attr, "unknown"); - DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + attr, "unknown"); + DPAA2_PMD_ERR("Create flow failed (%d)", ret); goto creation_error; } - return flow; + priv->curr = NULL; + return (struct rte_flow *)flow; + mem_failure: - rte_flow_error_set(error, EPERM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "memory alloc"); + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "memory alloc"); + creation_error: - rte_free((void *)flow); - rte_free((void *)key_iova); - rte_free((void *)mask_iova); + if (flow) { + if (flow->qos_key_addr) + rte_free(flow->qos_key_addr); + if (flow->qos_mask_addr) + rte_free(flow->qos_mask_addr); + if (flow->fs_key_addr) + rte_free(flow->fs_key_addr); + if (flow->fs_mask_addr) + rte_free(flow->fs_mask_addr); + rte_free(flow); + } + priv->curr = NULL; return NULL; } -static -int dpaa2_flow_destroy(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) +static int +dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow, + struct rte_flow_error *error) { int ret = 0; + struct dpaa2_dev_flow *flow; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = priv->hw; + + flow = (struct dpaa2_dev_flow *)_flow; - switch (flow->action) { + switch (flow->action_type) { case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: case RTE_FLOW_ACTION_TYPE_PORT_ID: if (priv->num_rx_tc > 1) { /* Remove entry from QoS table first */ - ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, - &flow->qos_rule); + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, + priv->token, + &flow->qos_rule); if (ret < 0) { - DPAA2_PMD_ERR( - "Error in removing entry from QoS table(%d)", ret); + DPAA2_PMD_ERR("Remove FS QoS entry failed"); + dpaa2_flow_qos_entry_log("Delete failed", flow, + -1); + abort(); goto error; } } @@ -4102,34 +5105,37 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev, ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token, flow->tc_id, &flow->fs_rule); if (ret < 0) { - DPAA2_PMD_ERR( - "Error in removing entry from FS table(%d)", ret); + DPAA2_PMD_ERR("Remove entry from FS[%d] failed", + flow->tc_id); goto error; } break; case RTE_FLOW_ACTION_TYPE_RSS: if (priv->num_rx_tc > 1) { - ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, - &flow->qos_rule); + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, + priv->token, + &flow->qos_rule); if (ret < 0) { - DPAA2_PMD_ERR( - "Error in entry addition in QoS table(%d)", ret); + DPAA2_PMD_ERR("Remove RSS QoS entry failed"); goto error; } } break; default: - DPAA2_PMD_ERR( - "Action type (%d) is not supported", flow->action); + DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type); ret = -ENOTSUP; break; } LIST_REMOVE(flow, next); - rte_free((void *)(size_t)flow->qos_rule.key_iova); - rte_free((void *)(size_t)flow->qos_rule.mask_iova); - rte_free((void *)(size_t)flow->fs_rule.key_iova); - rte_free((void *)(size_t)flow->fs_rule.mask_iova); + if (flow->qos_key_addr) + rte_free(flow->qos_key_addr); + if (flow->qos_mask_addr) + rte_free(flow->qos_mask_addr); + if (flow->fs_key_addr) + rte_free(flow->fs_key_addr); + if (flow->fs_mask_addr) + rte_free(flow->fs_mask_addr); /* Now free the flow */ rte_free(flow); @@ -4154,12 +5160,12 @@ dpaa2_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct rte_flow *flow = LIST_FIRST(&priv->flows); + struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows); while (flow) { - struct rte_flow *next = LIST_NEXT(flow, next); + struct dpaa2_dev_flow *next = LIST_NEXT(flow, next); - dpaa2_flow_destroy(dev, flow, error); + dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error); flow = next; } return 0; @@ -4167,10 +5173,10 @@ dpaa2_flow_flush(struct rte_eth_dev *dev, static int dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused, - struct rte_flow *flow __rte_unused, - const struct rte_flow_action *actions __rte_unused, - void *data __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow *_flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) { return 0; } @@ -4187,11 +5193,11 @@ dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused, void dpaa2_flow_clean(struct rte_eth_dev *dev) { - struct rte_flow *flow; + struct dpaa2_dev_flow *flow; struct dpaa2_dev_priv *priv = dev->data->dev_private; while ((flow = LIST_FIRST(&priv->flows))) - dpaa2_flow_destroy(dev, flow, NULL); + dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL); } const struct rte_flow_ops dpaa2_flow_ops = { diff --git a/drivers/net/dpaa2/dpaa2_mux.c b/drivers/net/dpaa2/dpaa2_mux.c index 7dd5a60966..e9d48a81a8 100644 --- a/drivers/net/dpaa2/dpaa2_mux.c +++ b/drivers/net/dpaa2/dpaa2_mux.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2021 NXP + * Copyright 2018-2021,2023 NXP */ #include @@ -32,8 +32,9 @@ struct dpaa2_dpdmux_dev { uint8_t num_ifs; /* Number of interfaces in DPDMUX */ }; -struct rte_flow { - struct dpdmux_rule_cfg rule; +#define DPAA2_MUX_FLOW_MAX_RULE_NUM 8 +struct dpaa2_mux_flow { + struct dpdmux_rule_cfg rule[DPAA2_MUX_FLOW_MAX_RULE_NUM]; }; TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev); @@ -44,7 +45,7 @@ static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) { struct dpaa2_dpdmux_dev *dpdmux_dev = NULL; - /* Get DPBP dev handle from list using index */ + /* Get DPDMUX dev handle from list using index */ TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) { if (dpdmux_dev->dpdmux_id == dpdmux_id) break; @@ -53,184 +54,316 @@ static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) return dpdmux_dev; } -struct rte_flow * +int rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, - struct rte_flow_item *pattern[], - struct rte_flow_action *actions[]) + struct rte_flow_item pattern[], + struct rte_flow_action actions[]) { struct dpaa2_dpdmux_dev *dpdmux_dev; + static struct dpkg_profile_cfg s_kg_cfg; struct dpkg_profile_cfg kg_cfg; const struct rte_flow_action_vf *vf_conf; struct dpdmux_cls_action dpdmux_action; - struct rte_flow *flow = NULL; - void *key_iova, *mask_iova, *key_cfg_iova = NULL; + uint8_t *key_va = NULL, *mask_va = NULL; + void *key_cfg_va = NULL; + uint64_t key_iova, mask_iova, key_cfg_iova; uint8_t key_size = 0; - int ret; - static int i; + int ret = 0, loop = 0; + static int s_i; + struct dpkg_extract *extract; + struct dpdmux_rule_cfg rule; - if (!pattern || !actions || !pattern[0] || !actions[0]) - return NULL; + memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); /* Find the DPDMUX from dpdmux_id in our list */ dpdmux_dev = get_dpdmux_from_id(dpdmux_id); if (!dpdmux_dev) { DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); - return NULL; + ret = -ENODEV; + goto creation_error; + } + + key_cfg_va = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, + RTE_CACHE_LINE_SIZE); + if (!key_cfg_va) { + DPAA2_PMD_ERR("Unable to allocate key configure buffer"); + ret = -ENOMEM; + goto creation_error; + } + + key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_va, + DIST_PARAM_IOVA_SIZE); + if (key_cfg_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)", + __func__, key_cfg_va); + ret = -ENOBUFS; + goto creation_error; } - key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, - RTE_CACHE_LINE_SIZE); - if (!key_cfg_iova) { - DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); - return NULL; + key_va = rte_zmalloc(NULL, (2 * DIST_PARAM_IOVA_SIZE), + RTE_CACHE_LINE_SIZE); + if (!key_va) { + DPAA2_PMD_ERR("Unable to allocate flow dist parameter"); + ret = -ENOMEM; + goto creation_error; } - flow = rte_zmalloc(NULL, sizeof(struct rte_flow) + - (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE); - if (!flow) { - DPAA2_PMD_ERR( - "Memory allocation failure for rule configuration"); + + key_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_va, + (2 * DIST_PARAM_IOVA_SIZE)); + if (key_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU mapping for address(%p)", + __func__, key_va); + ret = -ENOBUFS; goto creation_error; } - key_iova = (void *)((size_t)flow + sizeof(struct rte_flow)); - mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE); + + mask_va = key_va + DIST_PARAM_IOVA_SIZE; + mask_iova = key_iova + DIST_PARAM_IOVA_SIZE; /* Currently taking only IP protocol as an extract type. * This can be extended to other fields using pattern->type. */ memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); - switch (pattern[0]->type) { - case RTE_FLOW_ITEM_TYPE_IPV4: - { - const struct rte_flow_item_ipv4 *spec; - - kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP; - kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO; - kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; - kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; - kg_cfg.num_extracts = 1; - - spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec; - memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id), - sizeof(uint8_t)); - memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t)); - key_size = sizeof(uint8_t); - } - break; - - case RTE_FLOW_ITEM_TYPE_UDP: - { - const struct rte_flow_item_udp *spec; - uint16_t udp_dst_port; - - kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP; - kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; - kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; - kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; - kg_cfg.num_extracts = 1; - - spec = (const struct rte_flow_item_udp *)pattern[0]->spec; - udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port); - memcpy((void *)key_iova, (const void *)&udp_dst_port, - sizeof(rte_be16_t)); - memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t)); - key_size = sizeof(uint16_t); - } - break; - - case RTE_FLOW_ITEM_TYPE_ETH: - { - const struct rte_flow_item_eth *spec; - uint16_t eth_type; - - kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH; - kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE; - kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; - kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; - kg_cfg.num_extracts = 1; - - spec = (const struct rte_flow_item_eth *)pattern[0]->spec; - eth_type = rte_constant_bswap16(spec->hdr.ether_type); - memcpy((void *)key_iova, (const void *)ð_type, - sizeof(rte_be16_t)); - memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t)); - key_size = sizeof(uint16_t); - } - break; - - case RTE_FLOW_ITEM_TYPE_RAW: - { - const struct rte_flow_item_raw *spec; - - spec = (const struct rte_flow_item_raw *)pattern[0]->spec; - kg_cfg.extracts[0].extract.from_data.offset = spec->offset; - kg_cfg.extracts[0].extract.from_data.size = spec->length; - kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA; - kg_cfg.num_extracts = 1; - memcpy((void *)key_iova, (const void *)spec->pattern, - spec->length); - memcpy(mask_iova, pattern[0]->mask, spec->length); - - key_size = spec->length; - } - break; + while (pattern[loop].type != RTE_FLOW_ITEM_TYPE_END) { + if (kg_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Too many extracts(%d)", + kg_cfg.num_extracts); + ret = -ENOTSUP; + goto creation_error; + } + switch (pattern[loop].type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + const struct rte_flow_item_ipv4 *spec; + const struct rte_flow_item_ipv4 *mask; + + extract = &kg_cfg.extracts[kg_cfg.num_extracts]; + extract->type = DPKG_EXTRACT_FROM_HDR; + extract->extract.from_hdr.prot = NET_PROT_IP; + extract->extract.from_hdr.field = NH_FLD_IP_PROTO; + extract->extract.from_hdr.type = DPKG_FULL_FIELD; + + kg_cfg.num_extracts++; + + spec = pattern[loop].spec; + mask = pattern[loop].mask; + rte_memcpy(&key_va[key_size], + &spec->hdr.next_proto_id, sizeof(uint8_t)); + if (mask) { + rte_memcpy(&mask_va[key_size], + &mask->hdr.next_proto_id, + sizeof(uint8_t)); + } else { + mask_va[key_size] = 0xff; + } + key_size += sizeof(uint8_t); + } + break; + + case RTE_FLOW_ITEM_TYPE_VLAN: + { + const struct rte_flow_item_vlan *spec; + const struct rte_flow_item_vlan *mask; + + extract = &kg_cfg.extracts[kg_cfg.num_extracts]; + extract->type = DPKG_EXTRACT_FROM_HDR; + extract->extract.from_hdr.prot = NET_PROT_VLAN; + extract->extract.from_hdr.field = NH_FLD_VLAN_TCI; + extract->extract.from_hdr.type = DPKG_FULL_FIELD; + + kg_cfg.num_extracts++; + + spec = pattern[loop].spec; + mask = pattern[loop].mask; + rte_memcpy(&key_va[key_size], + &spec->tci, sizeof(uint16_t)); + if (mask) { + rte_memcpy(&mask_va[key_size], + &mask->tci, sizeof(uint16_t)); + } else { + memset(&mask_va[key_size], 0xff, + sizeof(rte_be16_t)); + } + key_size += sizeof(uint16_t); + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + { + const struct rte_flow_item_udp *spec; + const struct rte_flow_item_udp *mask; + + extract = &kg_cfg.extracts[kg_cfg.num_extracts]; + extract->type = DPKG_EXTRACT_FROM_HDR; + extract->extract.from_hdr.prot = NET_PROT_UDP; + extract->extract.from_hdr.type = DPKG_FULL_FIELD; + extract->extract.from_hdr.field = NH_FLD_UDP_PORT_DST; + kg_cfg.num_extracts++; + + spec = pattern[loop].spec; + mask = pattern[loop].mask; + rte_memcpy(&key_va[key_size], + &spec->hdr.dst_port, sizeof(rte_be16_t)); + if (mask) { + rte_memcpy(&mask_va[key_size], + &mask->hdr.dst_port, + sizeof(rte_be16_t)); + } else { + memset(&mask_va[key_size], 0xff, + sizeof(rte_be16_t)); + } + key_size += sizeof(rte_be16_t); + } + break; + + case RTE_FLOW_ITEM_TYPE_ETH: + { + const struct rte_flow_item_eth *spec; + const struct rte_flow_item_eth *mask; + + extract = &kg_cfg.extracts[kg_cfg.num_extracts]; + extract->type = DPKG_EXTRACT_FROM_HDR; + extract->extract.from_hdr.prot = NET_PROT_ETH; + extract->extract.from_hdr.type = DPKG_FULL_FIELD; + extract->extract.from_hdr.field = NH_FLD_ETH_TYPE; + kg_cfg.num_extracts++; + + spec = pattern[loop].spec; + mask = pattern[loop].mask; + rte_memcpy(&key_va[key_size], + &spec->type, sizeof(rte_be16_t)); + if (mask) { + rte_memcpy(&mask_va[key_size], + &mask->type, sizeof(rte_be16_t)); + } else { + memset(&mask_va[key_size], 0xff, + sizeof(rte_be16_t)); + } + key_size += sizeof(rte_be16_t); + } + break; + + case RTE_FLOW_ITEM_TYPE_RAW: + { + const struct rte_flow_item_raw *spec; + const struct rte_flow_item_raw *mask; + + spec = pattern[loop].spec; + mask = pattern[loop].mask; + extract = &kg_cfg.extracts[kg_cfg.num_extracts]; + extract->type = DPKG_EXTRACT_FROM_DATA; + extract->extract.from_data.offset = spec->offset; + extract->extract.from_data.size = spec->length; + kg_cfg.num_extracts++; + + rte_memcpy(&key_va[key_size], + spec->pattern, spec->length); + if (mask && mask->pattern) { + rte_memcpy(&mask_va[key_size], + mask->pattern, spec->length); + } else { + memset(&mask_va[key_size], 0xff, spec->length); + } + + key_size += spec->length; + } + break; - default: - DPAA2_PMD_ERR("Not supported pattern type: %d", - pattern[0]->type); - goto creation_error; + default: + DPAA2_PMD_ERR("Not supported pattern[%d] type: %d", + loop, pattern[loop].type); + ret = -ENOTSUP; + goto creation_error; + } + loop++; } - ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova); + ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_va); if (ret) { DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret); goto creation_error; } - /* Multiple rules with same DPKG extracts (kg_cfg.extracts) like same - * offset and length values in raw is supported right now. Different - * values of kg_cfg may not work. - */ - if (i == 0) { - ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW, - dpdmux_dev->token, - (uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova))); + if (!s_i) { + ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, + CMD_PRI_LOW, dpdmux_dev->token, key_cfg_iova); if (ret) { DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", - ret); + ret); + goto creation_error; + } + rte_memcpy(&s_kg_cfg, &kg_cfg, sizeof(struct dpkg_profile_cfg)); + } else { + if (memcmp(&s_kg_cfg, &kg_cfg, + sizeof(struct dpkg_profile_cfg))) { + DPAA2_PMD_ERR("%s: Single flow support only.", + __func__); + ret = -ENOTSUP; goto creation_error; } } - /* As now our key extract parameters are set, let us configure - * the rule. - */ - flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova)); - flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova)); - flow->rule.key_size = key_size; - flow->rule.entry_index = i++; - vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf); + vf_conf = actions[0].conf; if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { - DPAA2_PMD_ERR("Invalid destination id"); + DPAA2_PMD_ERR("Invalid destination id(%d)", vf_conf->id); goto creation_error; } dpdmux_action.dest_if = vf_conf->id; - ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW, - dpdmux_dev->token, &flow->rule, - &dpdmux_action); + rule.key_iova = key_iova; + rule.mask_iova = mask_iova; + rule.key_size = key_size; + rule.entry_index = s_i; + s_i++; + + /* As now our key extract parameters are set, let us configure + * the rule. + */ + ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, + CMD_PRI_LOW, dpdmux_dev->token, + &rule, &dpdmux_action); if (ret) { - DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)", - ret); + DPAA2_PMD_ERR("Add classification entry failed:err(%d)", ret); goto creation_error; } - return flow; - creation_error: - rte_free((void *)key_cfg_iova); - rte_free((void *)flow); - return NULL; + if (key_cfg_va) + rte_free(key_cfg_va); + if (key_va) + rte_free(key_va); + + return ret; +} + +int +rte_pmd_dpaa2_mux_flow_l2(uint32_t dpdmux_id, + uint8_t mac_addr[6], uint16_t vlan_id, int dest_if) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev; + struct dpdmux_l2_rule rule; + int ret, i; + + /* Find the DPDMUX from dpdmux_id in our list */ + dpdmux_dev = get_dpdmux_from_id(dpdmux_id); + if (!dpdmux_dev) { + DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); + return -ENODEV; + } + + for (i = 0; i < 6; i++) + rule.mac_addr[i] = mac_addr[i]; + rule.vlan_id = vlan_id; + + ret = dpdmux_if_add_l2_rule(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, dest_if, &rule); + if (ret) { + DPAA2_PMD_ERR("dpdmux_if_add_l2_rule failed:err(%d)", ret); + return ret; + } + + return 0; } int @@ -259,24 +392,110 @@ rte_pmd_dpaa2_mux_rx_frame_len(uint32_t dpdmux_id, uint16_t max_rx_frame_len) return ret; } +/* dump the status of the dpaa2_mux counters on the console */ +void +rte_pmd_dpaa2_mux_dump_counter(FILE *f, uint32_t dpdmux_id, int num_if) +{ + struct dpaa2_dpdmux_dev *dpdmux; + uint64_t counter; + int ret; + int if_id; + + /* Find the DPDMUX from dpdmux_id in our list */ + dpdmux = get_dpdmux_from_id(dpdmux_id); + if (!dpdmux) { + DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); + return; + } + + for (if_id = 0; if_id < num_if; if_id++) { + fprintf(f, "dpdmux.%d\n", if_id); + + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_FRAME, &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_FRAME %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_BYTE, &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_BYTE %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_FLTR_FRAME, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_FLTR_FRAME %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_FRAME_DISCARD, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_FRAME_DISCARD %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_MCAST_FRAME, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_MCAST_FRAME %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_MCAST_BYTE, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_MCAST_BYTE %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_BCAST_FRAME, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_BCAST_FRAME %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_ING_BCAST_BYTES, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_ING_BCAST_BYTES %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_EGR_FRAME, &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_EGR_FRAME %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_EGR_BYTE, &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_EGR_BYTE %" PRIu64 "\n", + counter); + ret = dpdmux_if_get_counter(&dpdmux->dpdmux, CMD_PRI_LOW, + dpdmux->token, if_id, DPDMUX_CNT_EGR_FRAME_DISCARD, + &counter); + if (!ret) + fprintf(f, "DPDMUX_CNT_EGR_FRAME_DISCARD %" PRIu64 "\n", + counter); + } +} + static int dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, - struct vfio_device_info *obj_info __rte_unused, - int dpdmux_id) + struct vfio_device_info *obj_info __rte_unused, + struct rte_dpaa2_device *obj) { struct dpaa2_dpdmux_dev *dpdmux_dev; struct dpdmux_attr attr; - int ret; + int ret, dpdmux_id = obj->object_id; uint16_t maj_ver; uint16_t min_ver; + uint8_t skip_reset_flags; PMD_INIT_FUNC_TRACE(); /* Allocate DPAA2 dpdmux handle */ - dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0); + dpdmux_dev = rte_zmalloc(NULL, + sizeof(struct dpaa2_dpdmux_dev), RTE_CACHE_LINE_SIZE); if (!dpdmux_dev) { DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device"); - return -1; + return -ENOMEM; } /* Open the dpdmux object */ @@ -295,12 +514,18 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, goto init_err; } - ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, - dpdmux_dev->token, attr.default_if); - if (ret) { - DPAA2_PMD_ERR("setting default interface failed in %s", - __func__); - goto init_err; + if (attr.method != DPDMUX_METHOD_C_VLAN_MAC) { + ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, attr.default_if); + if (ret) { + DPAA2_PMD_ERR("setting default interface failed in %s", + __func__); + goto init_err; + } + skip_reset_flags = DPDMUX_SKIP_DEFAULT_INTERFACE + | DPDMUX_SKIP_UNICAST_RULES | DPDMUX_SKIP_MULTICAST_RULES; + } else { + skip_reset_flags = DPDMUX_SKIP_DEFAULT_INTERFACE; } ret = dpdmux_get_api_version(&dpdmux_dev->dpdmux, CMD_PRI_LOW, @@ -316,10 +541,7 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, */ if (maj_ver >= 6 && min_ver >= 6) { ret = dpdmux_set_resetable(&dpdmux_dev->dpdmux, CMD_PRI_LOW, - dpdmux_dev->token, - DPDMUX_SKIP_DEFAULT_INTERFACE | - DPDMUX_SKIP_UNICAST_RULES | - DPDMUX_SKIP_MULTICAST_RULES); + dpdmux_dev->token, skip_reset_flags); if (ret) { DPAA2_PMD_ERR("setting default interface failed in %s", __func__); @@ -331,8 +553,11 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, struct dpdmux_error_cfg mux_err_cfg; memset(&mux_err_cfg, 0, sizeof(mux_err_cfg)); + /* Note: Discarded flag(DPDMUX_ERROR_DISC) has effect only when + * ERROR_ACTION is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE. + */ + mux_err_cfg.errors = DPDMUX_ALL_ERRORS; mux_err_cfg.error_action = DPDMUX_ERROR_ACTION_CONTINUE; - mux_err_cfg.errors = DPDMUX_ERROR_DISC; ret = dpdmux_if_set_errors_behavior(&dpdmux_dev->dpdmux, CMD_PRI_LOW, @@ -358,9 +583,25 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, return -1; } +static void +dpaa2_close_dpdmux_device(int object_id) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev; + + dpdmux_dev = get_dpdmux_from_id((uint32_t)object_id); + + if (dpdmux_dev) { + dpdmux_close(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token); + TAILQ_REMOVE(&dpdmux_dev_list, dpdmux_dev, next); + rte_free(dpdmux_dev); + } +} + static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = { .dev_type = DPAA2_MUX, .create = dpaa2_create_dpdmux_device, + .close = dpaa2_close_dpdmux_device, }; RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj); diff --git a/drivers/net/dpaa2/dpaa2_parse_dump.h b/drivers/net/dpaa2/dpaa2_parse_dump.h new file mode 100644 index 0000000000..78fd3b768c --- /dev/null +++ b/drivers/net/dpaa2/dpaa2_parse_dump.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright 2022 NXP + * + */ + +#ifndef _DPAA2_PARSE_DUMP_H +#define _DPAA2_PARSE_DUMP_H + +#include +#include + +#include +#include "dpaa2_tm.h" + +#include +#include + +#include "base/dpaa2_hw_dpni_annot.h" + +#define DPAA2_PR_PRINT printf + +struct dpaa2_faf_bit_info { + const char *name; + int position; +}; + +struct dpaa2_fapr_field_info { + const char *name; + uint16_t value; +}; + +struct dpaa2_fapr_array { + union { + uint64_t pr_64[DPAA2_FAPR_SIZE / 8]; + uint8_t pr[DPAA2_FAPR_SIZE]; + }; +}; + +#define NEXT_HEADER_NAME "Next Header" +#define ETH_OFF_NAME "ETH OFFSET" +#define VLAN_TCI_OFF_NAME "VLAN TCI OFFSET" +#define LAST_ENTRY_OFF_NAME "LAST ETYPE Offset" +#define L3_OFF_NAME "L3 Offset" +#define L4_OFF_NAME "L4 Offset" +#define L5_OFF_NAME "L5 Offset" +#define NEXT_HEADER_OFF_NAME "Next Header Offset" + +static const +struct dpaa2_fapr_field_info support_dump_fields[] = { + { + .name = NEXT_HEADER_NAME, + }, + { + .name = ETH_OFF_NAME, + }, + { + .name = VLAN_TCI_OFF_NAME, + }, + { + .name = LAST_ENTRY_OFF_NAME, + }, + { + .name = L3_OFF_NAME, + }, + { + .name = L4_OFF_NAME, + }, + { + .name = L5_OFF_NAME, + }, + { + .name = NEXT_HEADER_OFF_NAME, + } +}; + +static inline void +dpaa2_print_faf(struct dpaa2_fapr_array *fapr) +{ + const int faf_bit_len = DPAA2_FAF_TOTAL_SIZE * 8; + struct dpaa2_faf_bit_info faf_bits[faf_bit_len]; + int i, byte_pos, bit_pos, vxlan = 0, vxlan_vlan = 0; + struct rte_ether_hdr vxlan_in_eth; + uint16_t vxlan_vlan_tci; + + for (i = 0; i < faf_bit_len; i++) { + faf_bits[i].position = i; + if (i == FAFE_VXLAN_IN_VLAN_FRAM) + faf_bits[i].name = "VXLAN VLAN Present"; + else if (i == FAFE_VXLAN_IN_IPV4_FRAM) + faf_bits[i].name = "VXLAN IPV4 Present"; + else if (i == FAFE_VXLAN_IN_IPV6_FRAM) + faf_bits[i].name = "VXLAN IPV6 Present"; + else if (i == FAFE_VXLAN_IN_UDP_FRAM) + faf_bits[i].name = "VXLAN UDP Present"; + else if (i == FAFE_VXLAN_IN_TCP_FRAM) + faf_bits[i].name = "VXLAN TCP Present"; + else if (i == FAF_VXLAN_FRAM) + faf_bits[i].name = "VXLAN Present"; + else if (i == FAF_ETH_FRAM) + faf_bits[i].name = "Ethernet MAC Present"; + else if (i == FAF_VLAN_FRAM) + faf_bits[i].name = "VLAN 1 Present"; + else if (i == FAF_IPV4_FRAM) + faf_bits[i].name = "IPv4 1 Present"; + else if (i == FAF_IPV6_FRAM) + faf_bits[i].name = "IPv6 1 Present"; + else if (i == FAF_IP_FRAG_FRAM) + faf_bits[i].name = "IP fragment Present"; + else if (i == FAF_UDP_FRAM) + faf_bits[i].name = "UDP Present"; + else if (i == FAF_TCP_FRAM) + faf_bits[i].name = "TCP Present"; + else + faf_bits[i].name = "Check RM for this unusual frame"; + } + + DPAA2_PR_PRINT("Frame Annotation Flags:\r\n"); + for (i = 0; i < faf_bit_len; i++) { + byte_pos = i / 8 + DPAA2_FAFE_PSR_OFFSET; + bit_pos = i % 8; + if (fapr->pr[byte_pos] & (1 << (7 - bit_pos))) { + DPAA2_PR_PRINT("FAF bit %d : %s\r\n", + faf_bits[i].position, faf_bits[i].name); + if (i == FAF_VXLAN_FRAM) + vxlan = 1; + } + } + + if (vxlan) { + vxlan_in_eth.dst_addr.addr_bytes[0] = + fapr->pr[DPAA2_VXLAN_IN_DADDR0_OFFSET]; + vxlan_in_eth.dst_addr.addr_bytes[1] = + fapr->pr[DPAA2_VXLAN_IN_DADDR1_OFFSET]; + vxlan_in_eth.dst_addr.addr_bytes[2] = + fapr->pr[DPAA2_VXLAN_IN_DADDR2_OFFSET]; + vxlan_in_eth.dst_addr.addr_bytes[3] = + fapr->pr[DPAA2_VXLAN_IN_DADDR3_OFFSET]; + vxlan_in_eth.dst_addr.addr_bytes[4] = + fapr->pr[DPAA2_VXLAN_IN_DADDR4_OFFSET]; + vxlan_in_eth.dst_addr.addr_bytes[5] = + fapr->pr[DPAA2_VXLAN_IN_DADDR5_OFFSET]; + + vxlan_in_eth.src_addr.addr_bytes[0] = + fapr->pr[DPAA2_VXLAN_IN_SADDR0_OFFSET]; + vxlan_in_eth.src_addr.addr_bytes[1] = + fapr->pr[DPAA2_VXLAN_IN_SADDR1_OFFSET]; + vxlan_in_eth.src_addr.addr_bytes[2] = + fapr->pr[DPAA2_VXLAN_IN_SADDR2_OFFSET]; + vxlan_in_eth.src_addr.addr_bytes[3] = + fapr->pr[DPAA2_VXLAN_IN_SADDR3_OFFSET]; + vxlan_in_eth.src_addr.addr_bytes[4] = + fapr->pr[DPAA2_VXLAN_IN_SADDR4_OFFSET]; + vxlan_in_eth.src_addr.addr_bytes[5] = + fapr->pr[DPAA2_VXLAN_IN_SADDR5_OFFSET]; + + vxlan_in_eth.ether_type = + fapr->pr[DPAA2_VXLAN_IN_TYPE_OFFSET]; + vxlan_in_eth.ether_type = + vxlan_in_eth.ether_type << 8; + vxlan_in_eth.ether_type |= + fapr->pr[DPAA2_VXLAN_IN_TYPE_OFFSET + 1]; + + if (vxlan_in_eth.ether_type == RTE_ETHER_TYPE_VLAN) + vxlan_vlan = 1; + DPAA2_PR_PRINT("VXLAN inner eth:\r\n"); + DPAA2_PR_PRINT("dst addr: "); + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { + if (i != 0) + DPAA2_PR_PRINT(":"); + DPAA2_PR_PRINT("%02x", + vxlan_in_eth.dst_addr.addr_bytes[i]); + } + DPAA2_PR_PRINT("\r\n"); + DPAA2_PR_PRINT("src addr: "); + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { + if (i != 0) + DPAA2_PR_PRINT(":"); + DPAA2_PR_PRINT("%02x", + vxlan_in_eth.src_addr.addr_bytes[i]); + } + DPAA2_PR_PRINT("\r\n"); + DPAA2_PR_PRINT("type: 0x%04x\r\n", + vxlan_in_eth.ether_type); + if (vxlan_vlan) { + vxlan_vlan_tci = fapr->pr[DPAA2_VXLAN_IN_TCI_OFFSET]; + vxlan_vlan_tci = vxlan_vlan_tci << 8; + vxlan_vlan_tci |= + fapr->pr[DPAA2_VXLAN_IN_TCI_OFFSET + 1]; + + DPAA2_PR_PRINT("vlan tci: 0x%04x\r\n", + vxlan_vlan_tci); + } + } +} + +static inline void +dpaa2_print_parse_result(struct dpaa2_annot_hdr *annotation) +{ + struct dpaa2_fapr_array fapr; + struct dpaa2_fapr_field_info + fapr_fields[sizeof(support_dump_fields) / + sizeof(struct dpaa2_fapr_field_info)]; + uint64_t len, i; + + memcpy(&fapr, &annotation->word3, DPAA2_FAPR_SIZE); + for (i = 0; i < (DPAA2_FAPR_SIZE / 8); i++) + fapr.pr_64[i] = rte_cpu_to_be_64(fapr.pr_64[i]); + + memcpy(fapr_fields, support_dump_fields, + sizeof(support_dump_fields)); + + for (i = 0; + i < sizeof(fapr_fields) / + sizeof(struct dpaa2_fapr_field_info); + i++) { + if (!strcmp(fapr_fields[i].name, NEXT_HEADER_NAME)) { + fapr_fields[i].value = fapr.pr[DPAA2_PR_NXTHDR_OFFSET]; + fapr_fields[i].value = fapr_fields[i].value << 8; + fapr_fields[i].value |= + fapr.pr[DPAA2_PR_NXTHDR_OFFSET + 1]; + } else if (!strcmp(fapr_fields[i].name, ETH_OFF_NAME)) { + fapr_fields[i].value = fapr.pr[DPAA2_PR_ETH_OFF_OFFSET]; + } else if (!strcmp(fapr_fields[i].name, VLAN_TCI_OFF_NAME)) { + fapr_fields[i].value = fapr.pr[DPAA2_PR_TCI_OFF_OFFSET]; + } else if (!strcmp(fapr_fields[i].name, LAST_ENTRY_OFF_NAME)) { + fapr_fields[i].value = + fapr.pr[DPAA2_PR_LAST_ETYPE_OFFSET]; + } else if (!strcmp(fapr_fields[i].name, L3_OFF_NAME)) { + fapr_fields[i].value = fapr.pr[DPAA2_PR_L3_OFF_OFFSET]; + } else if (!strcmp(fapr_fields[i].name, L4_OFF_NAME)) { + fapr_fields[i].value = fapr.pr[DPAA2_PR_L4_OFF_OFFSET]; + } else if (!strcmp(fapr_fields[i].name, L5_OFF_NAME)) { + fapr_fields[i].value = fapr.pr[DPAA2_PR_L5_OFF_OFFSET]; + } else if (!strcmp(fapr_fields[i].name, NEXT_HEADER_OFF_NAME)) { + fapr_fields[i].value = + fapr.pr[DPAA2_PR_NXTHDR_OFF_OFFSET]; + } + } + + len = sizeof(fapr_fields) / sizeof(struct dpaa2_fapr_field_info); + DPAA2_PR_PRINT("Parse Result:\r\n"); + for (i = 0; i < len; i++) { + DPAA2_PR_PRINT("%21s : 0x%02x\r\n", + fapr_fields[i].name, fapr_fields[i].value); + } + dpaa2_print_faf(&fapr); +} + +#endif diff --git a/drivers/net/dpaa2/dpaa2_ptp.c b/drivers/net/dpaa2/dpaa2_ptp.c index c08aa0f3bf..751e558c73 100644 --- a/drivers/net/dpaa2/dpaa2_ptp.c +++ b/drivers/net/dpaa2/dpaa2_ptp.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2019 NXP + * Copyright 2019, 2023 NXP */ #include @@ -134,11 +134,11 @@ int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev, #if defined(RTE_LIBRTE_IEEE1588) static int dpaa2_create_dprtc_device(int vdev_fd __rte_unused, - struct vfio_device_info *obj_info __rte_unused, - int dprtc_id) + struct vfio_device_info *obj_info __rte_unused, + struct rte_dpaa2_device *obj) { struct dprtc_attr attr; - int ret; + int ret, dprtc_id = obj->object_id; PMD_INIT_FUNC_TRACE(); diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 92e9dd40dc..e3b6c7e460 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2021 NXP + * Copyright 2016-2024 NXP * */ @@ -25,6 +25,7 @@ #include "dpaa2_pmd_logs.h" #include "dpaa2_ethdev.h" #include "base/dpaa2_hw_dpni_annot.h" +#include "dpaa2_parse_dump.h" static inline uint32_t __rte_hot dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, @@ -57,6 +58,9 @@ dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd, struct dpaa2_annot_hdr *annotation = (struct dpaa2_annot_hdr *)hw_annot_addr; + if (unlikely(dpaa2_print_parser_result)) + dpaa2_print_parse_result(annotation); + m->packet_type = RTE_PTYPE_UNKNOWN; switch (frc) { case DPAA2_PKT_TYPE_ETHER: @@ -252,6 +256,9 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) else mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + if (unlikely(dpaa2_print_parser_result)) + dpaa2_print_parse_result(annotation); + if (dpaa2_enable_ts[mbuf->port]) { *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag; @@ -381,6 +388,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd, mbuf->pkt_len = mbuf->data_len; mbuf->port = port_id; mbuf->next = NULL; + mbuf->hash.sched.color = DPAA2_GET_FD_DROPP(fd); rte_mbuf_refcnt_set(mbuf, 1); #ifdef RTE_LIBRTE_MEMPOOL_DEBUG rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), @@ -647,7 +655,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q) } swp = DPAA2_PER_LCORE_PORTAL; - dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0]; + dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, @@ -716,7 +724,7 @@ uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { /* Function receive frames for a given device and VQ*/ - struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct dpaa2_queue *dpaa2_q = queue; struct qbman_result *dq_storage, *dq_storage1 = NULL; uint32_t fqid = dpaa2_q->fqid; int ret, num_rx = 0, pull_size; @@ -724,10 +732,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct qbman_swp *swp; const struct qbman_fd *fd; struct qbman_pull_desc pulldesc; - struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct queue_storage_info_t *q_storage; struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; struct dpaa2_dev_priv *priv = eth_data->dev_private; + q_storage = dpaa2_q->q_storage[rte_lcore_id()]; + if (unlikely(dpaa2_enable_err_queue)) dump_err_pkts(priv->rx_err_vq); @@ -958,7 +968,7 @@ uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { /* Function receive frames for a given device and VQ */ - struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct dpaa2_queue *dpaa2_q = queue; struct qbman_result *dq_storage; uint32_t fqid = dpaa2_q->fqid; int ret, num_rx = 0, next_pull = nb_pkts, num_pulled; @@ -984,7 +994,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) swp = DPAA2_PER_LCORE_PORTAL; do { - dq_storage = dpaa2_q->q_storage->dq_storage[0]; + dq_storage = dpaa2_q->q_storage[0]->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, @@ -1115,7 +1125,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue) swp = DPAA2_PER_LCORE_PORTAL; do { - dq_storage = dpaa2_q->q_storage->dq_storage[0]; + dq_storage = dpaa2_q->q_storage[0]->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, @@ -1290,8 +1300,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) while (qbman_result_SCN_state(dpaa2_q->cscn)) { retry_count++; /* Retry for some time before giving up */ - if (retry_count > CONG_RETRY_COUNT) + if (retry_count > CONG_RETRY_COUNT) { + if (dpaa2_q->tm_sw_td) + goto sw_td; goto skip_tx; + } } frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? @@ -1483,6 +1496,25 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rte_pktmbuf_free_seg(buf_to_free[loop].seg); } + return num_tx; +sw_td: + loop = 0; + while (loop < num_tx) { + if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) + rte_pktmbuf_free(*bufs); + bufs++; + loop++; + } + + /* free the pending buffers */ + while (nb_pkts) { + rte_pktmbuf_free(*bufs); + bufs++; + nb_pkts--; + num_tx++; + } + dpaa2_q->tx_pkts += num_tx; + return num_tx; } @@ -1954,12 +1986,13 @@ dpaa2_dev_loopback_rx(void *queue, struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; struct qbman_pull_desc pulldesc; struct qbman_eq_desc eqdesc; - struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct queue_storage_info_t *q_storage; struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; struct dpaa2_dev_priv *priv = eth_data->dev_private; struct dpaa2_queue *tx_q = priv->tx_vq[0]; /* todo - currently we are using 1st TX queue only for loopback*/ + q_storage = dpaa2_q->q_storage[rte_lcore_id()]; if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { ret = dpaa2_affine_qbman_ethrx_swp(); if (ret) { diff --git a/drivers/net/dpaa2/dpaa2_sparser.c b/drivers/net/dpaa2/dpaa2_sparser.c index 59f7a172c6..265c9b5c57 100644 --- a/drivers/net/dpaa2/dpaa2_sparser.c +++ b/drivers/net/dpaa2/dpaa2_sparser.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2019 NXP + * Copyright 2018-2023 NXP */ #include @@ -170,7 +170,14 @@ int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv, } memcpy(addr, sp_param.byte_code, sp_param.size); - cfg.ss_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(addr)); + cfg.ss_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(addr, sp_param.size); + if (cfg.ss_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("No IOMMU map for soft sequence(%p), size=%d", + addr, sp_param.size); + rte_free(addr); + + return -ENOBUFS; + } ret = dpni_load_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg); if (ret) { @@ -179,7 +186,7 @@ int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv, return ret; } - priv->ss_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(addr)); + priv->ss_iova = cfg.ss_iova; priv->ss_offset += sp_param.size; DPAA2_PMD_INFO("Soft parser loaded for dpni@%d", priv->hw_id); @@ -219,7 +226,15 @@ int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv, } memcpy(param_addr, sp_param.param_array, cfg.param_size); - cfg.param_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(param_addr)); + cfg.param_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(param_addr, + cfg.param_size); + if (cfg.param_iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("%s: No IOMMU map for %p, size=%d", + __func__, param_addr, cfg.param_size); + rte_free(param_addr); + + return -ENOBUFS; + } priv->ss_param_iova = cfg.param_iova; } else { cfg.param_iova = 0; @@ -227,7 +242,7 @@ int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv, ret = dpni_enable_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg); if (ret) { - DPAA2_PMD_ERR("dpni_enable_sw_sequence failed for dpni%d", + DPAA2_PMD_ERR("Soft parser enabled for dpni@%d failed", priv->hw_id); rte_free(param_addr); return ret; diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c index fb8c384ca4..f91392b092 100644 --- a/drivers/net/dpaa2/dpaa2_tm.c +++ b/drivers/net/dpaa2/dpaa2_tm.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2020-2021 NXP + * Copyright 2020-2023 NXP */ #include @@ -572,41 +572,42 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node) struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpaa2_queue *dpaa2_q; + uint64_t iova; memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); - dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id]; + dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id]; tc_id = node->parent->tc_id; node->parent->tc_id++; flow_id = 0; - if (dpaa2_q == NULL) { - DPAA2_PMD_ERR("Queue is not configured for node = %d", node->id); - return -1; + if (!dpaa2_q) { + DPAA2_PMD_ERR("Queue is not configured for node = %d", + node->id); + return -ENOMEM; } DPAA2_PMD_DEBUG("tc_id = %d, channel = %d", tc_id, node->parent->channel_id); ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, - ((node->parent->channel_id << 8) | tc_id), - flow_id, options, &tx_flow_cfg); + ((node->parent->channel_id << 8) | tc_id), + flow_id, options, &tx_flow_cfg); if (ret) { - DPAA2_PMD_ERR("Error in setting the tx flow: " - "channel id = %d tc_id= %d, param = 0x%x " - "flow=%d err=%d", node->parent->channel_id, tc_id, - ((node->parent->channel_id << 8) | tc_id), flow_id, - ret); - return -1; + DPAA2_PMD_ERR("Set the TC[%d].ch[%d].TX flow[%d] (err=%d)", + tc_id, node->parent->channel_id, flow_id, + ret); + return ret; } dpaa2_q->flow_id = flow_id; dpaa2_q->tc_index = tc_id; ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index), - dpaa2_q->flow_id, &tx_flow_cfg, &qid); + DPNI_QUEUE_TX, + ((node->parent->channel_id << 8) | dpaa2_q->tc_index), + dpaa2_q->flow_id, &tx_flow_cfg, &qid); if (ret) { DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); - return -1; + return ret; } dpaa2_q->fqid = qid.fqid; @@ -621,8 +622,13 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node) */ cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10; cong_notif_cfg.message_ctx = 0; - cong_notif_cfg.message_iova = - (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn, + sizeof(struct qbman_result)); + if (iova == RTE_BAD_IOVA) { + DPAA2_PMD_ERR("No IOMMU map for cscn(%p)", dpaa2_q->cscn); + return -ENOBUFS; + } + cong_notif_cfg.message_iova = iova; cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | @@ -641,6 +647,7 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node) return -ret; } } + dpaa2_q->tm_sw_td = true; return 0; } @@ -684,6 +691,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node; struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret, t; + bool conf_schedule = false; /* Populate TCs */ LIST_FOREACH(channel_node, &priv->nodes, next) { @@ -757,7 +765,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, } LIST_FOREACH(channel_node, &priv->nodes, next) { - int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC]; + int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues]; struct dpni_tx_priorities_cfg prio_cfg; memset(&prio_cfg, 0, sizeof(prio_cfg)); @@ -767,6 +775,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, if (channel_node->level_id != CHANNEL_LEVEL) continue; + conf_schedule = false; LIST_FOREACH(leaf_node, &priv->nodes, next) { struct dpaa2_queue *leaf_dpaa2_q; uint8_t leaf_tc_id; @@ -789,6 +798,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, if (leaf_node->parent != channel_node) continue; + conf_schedule = true; leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id]; leaf_tc_id = leaf_dpaa2_q->tc_index; /* Process sibling leaf nodes */ @@ -829,8 +839,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, goto out; } is_wfq_grp = 1; - conf[temp_leaf_node->id] = 1; } + conf[temp_leaf_node->id] = 1; } if (is_wfq_grp) { if (wfq_grp == 0) { @@ -851,6 +861,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, } conf[leaf_node->id] = 1; } + if (!conf_schedule) + continue; + if (wfq_grp > 1) { prio_cfg.separate_groups = 1; if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) { @@ -864,6 +877,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, prio_cfg.prio_group_A = 1; prio_cfg.channel_idx = channel_node->channel_id; + DPAA2_PMD_DEBUG("########################################"); + DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx); + for (t = 0; t < DPNI_MAX_TC; t++) + DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d", t, + prio_cfg.tc_sched[t].mode, + prio_cfg.tc_sched[t].delta_bandwidth); + + DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps" + " = %d", prio_cfg.prio_group_A, + prio_cfg.prio_group_B, prio_cfg.separate_groups); ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg); if (ret) { ret = -rte_tm_error_set(error, EINVAL, @@ -871,15 +894,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, "Scheduling Failed\n"); goto out; } - DPAA2_PMD_DEBUG("########################################"); - DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx); - for (t = 0; t < DPNI_MAX_TC; t++) { - DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode); - DPAA2_PMD_DEBUG("delta = %d", prio_cfg.tc_sched[t].delta_bandwidth); - } - DPAA2_PMD_DEBUG("prioritya = %d", prio_cfg.prio_group_A); - DPAA2_PMD_DEBUG("priorityb = %d", prio_cfg.prio_group_B); - DPAA2_PMD_DEBUG("separate grps = %d", prio_cfg.separate_groups); } return 0; diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c index 1bb153cad7..f4feef3840 100644 --- a/drivers/net/dpaa2/mc/dpdmux.c +++ b/drivers/net/dpaa2/mc/dpdmux.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2018-2021 NXP + * Copyright 2018-2023 NXP * */ #include @@ -287,15 +287,19 @@ int dpdmux_reset(struct fsl_mc_io *mc_io, * @token: Token of DPDMUX object * @skip_reset_flags: By default all are 0. * By setting 1 will deactivate the reset. - * The flags are: - * DPDMUX_SKIP_DEFAULT_INTERFACE 0x01 - * DPDMUX_SKIP_UNICAST_RULES 0x02 - * DPDMUX_SKIP_MULTICAST_RULES 0x04 + * The flags are: + * DPDMUX_SKIP_MODIFY_DEFAULT_INTERFACE 0x01 + * DPDMUX_SKIP_UNICAST_RULES 0x02 + * DPDMUX_SKIP_MULTICAST_RULES 0x04 + * DPDMUX_SKIP_RESET_DEFAULT_INTERFACE 0x08 * * For example, by default, through DPDMUX_RESET the default * interface will be restored with the one from create. - * By setting DPDMUX_SKIP_DEFAULT_INTERFACE flag, - * through DPDMUX_RESET the default interface will not be modified. + * By setting DPDMUX_SKIP_MODIFY_DEFAULT_INTERFACE flag, + * through DPDMUX_RESET the default interface will not be modified after reset. + * By setting DPDMUX_SKIP_RESET_DEFAULT_INTERFACE flag, + * through DPDMUX_RESET the default interface will not be reset + * and will continue to be functional during reset procedure. * * Return: '0' on Success; Error code otherwise. */ @@ -327,10 +331,11 @@ int dpdmux_set_resetable(struct fsl_mc_io *mc_io, * @token: Token of DPDMUX object * @skip_reset_flags: Get the reset flags. * - * The flags are: - * DPDMUX_SKIP_DEFAULT_INTERFACE 0x01 - * DPDMUX_SKIP_UNICAST_RULES 0x02 - * DPDMUX_SKIP_MULTICAST_RULES 0x04 + * The flags are: + * DPDMUX_SKIP_MODIFY_DEFAULT_INTERFACE 0x01 + * DPDMUX_SKIP_UNICAST_RULES 0x02 + * DPDMUX_SKIP_MULTICAST_RULES 0x04 + * DPDMUX_SKIP_RESET_DEFAULT_INTERFACE 0x08 * * Return: '0' on Success; Error code otherwise. */ @@ -1064,6 +1069,127 @@ int dpdmux_get_api_version(struct fsl_mc_io *mc_io, return 0; } +/** + * dpdmux_if_set_taildrop() - enable taildrop for egress interface queues. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * @cfg: Taildrop configuration + */ +int dpdmux_if_set_taildrop(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t if_id, struct dpdmux_taildrop_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_set_taildrop *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->units = cfg->units; + cmd_params->threshold = cpu_to_le32(cfg->threshold); + dpdmux_set_field(cmd_params->oal_en, ENABLE, (!!cfg->enable)); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_taildrop() - get current taildrop configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * @cfg: Taildrop configuration + */ +int dpdmux_if_get_taildrop(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t if_id, struct dpdmux_taildrop_cfg *cfg) +{ + struct mc_command cmd = {0}; + struct dpdmux_cmd_get_taildrop *cmd_params; + struct dpdmux_rsp_get_taildrop *rsp_params; + int err = 0; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_get_taildrop *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_taildrop *)cmd.params; + cfg->threshold = le32_to_cpu(rsp_params->threshold); + cfg->units = rsp_params->units; + cfg->enable = dpdmux_get_field(rsp_params->oal_en, ENABLE); + + return err; +} + +/** + * dpdmux_dump_table() - Dump the content of table_type table into memory. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @table_type: The type of the table to dump + * - DPDMUX_DMAT_TABLE + * - DPDMUX_MISS_TABLE + * - DPDMUX_PRUNE_TABLE + * @table_index: The index of the table to dump in case of more than one table + * if table_type == DPDMUX_DMAT_TABLE + * - DPDMUX_HMAP_UNICAST + * - DPDMUX_HMAP_MULTICAST + * else 0 + * @iova_addr: The snapshot will be stored in this variable as an header of struct dump_table_header + * followed by an array of struct dump_table_entry + * @iova_size: Memory size allocated for iova_addr + * @num_entries: Number of entries written in iova_addr + * + * Return: Completion status. '0' on Success; Error code otherwise. + * + * The memory allocated at iova_addr must be zeroed before command execution. + * If the table content exceeds the memory size provided the dump will be truncated. + */ +int dpdmux_dump_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t table_type, + uint16_t table_index, + uint64_t iova_addr, + uint32_t iova_size, + uint16_t *num_entries) +{ + struct mc_command cmd = { 0 }; + int err; + struct dpdmux_cmd_dump_table *cmd_params; + struct dpdmux_rsp_dump_table *rsp_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DUMP_TABLE, cmd_flags, token); + cmd_params = (struct dpdmux_cmd_dump_table *)cmd.params; + cmd_params->table_type = cpu_to_le16(table_type); + cmd_params->table_index = cpu_to_le16(table_index); + cmd_params->iova_addr = cpu_to_le64(iova_addr); + cmd_params->iova_size = cpu_to_le32(iova_size); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpdmux_rsp_dump_table *)cmd.params; + *num_entries = le16_to_cpu(rsp_params->num_entries); + + return 0; +} + + /** * dpdmux_if_set_errors_behavior() - Set errors behavior * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -1100,3 +1226,60 @@ int dpdmux_if_set_errors_behavior(struct fsl_mc_io *mc_io, uint32_t cmd_flags, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } + +/* Sets up a Soft Parser Profile on this DPDMUX + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @sp_profile: Soft Parser Profile name (must a valid name for a defined profile) + * Maximum allowed length for this string is 8 characters long + * If this parameter is empty string (all zeros) + * then the Default SP Profile is set on this dpdmux + * @type: one of the SP Profile types defined above: Ingress or Egress (or both using bitwise OR) + */ +int dpdmux_set_sp_profile(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t sp_profile[], uint8_t type) +{ + struct dpdmux_cmd_set_sp_profile *cmd_params; + struct mc_command cmd = { 0 }; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_SP_PROFILE, + cmd_flags, token); + + cmd_params = (struct dpdmux_cmd_set_sp_profile *)cmd.params; + for (i = 0; i < MAX_SP_PROFILE_ID_SIZE && sp_profile[i]; i++) + cmd_params->sp_profile[i] = sp_profile[i]; + cmd_params->type = type; + + /* send command to MC */ + return mc_send_command(mc_io, &cmd); +} + +/* Enable/Disable Soft Parser on this DPDMUX interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: interface id + * @type: one of the SP Profile types defined above: Ingress or Egress (or both using bitwise OR) + * @en: 1 to enable or 0 to disable + */ +int dpdmux_sp_enable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t if_id, uint8_t type, uint8_t en) +{ + struct dpdmux_cmd_sp_enable *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SP_ENABLE, + cmd_flags, token); + + cmd_params = (struct dpdmux_cmd_sp_enable *)cmd.params; + cmd_params->if_id = if_id; + cmd_params->type = type; + cmd_params->en = en; + + /* send command to MC */ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c index 4789976b7d..5db3d092c1 100644 --- a/drivers/net/dpaa2/mc/dpkg.c +++ b/drivers/net/dpaa2/mc/dpkg.c @@ -1,16 +1,18 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * - * Copyright 2017-2021 NXP + * Copyright 2017-2021, 2023 NXP * */ #include #include #include +#include /** * dpkg_prepare_key_cfg() - function prepare extract parameters * @cfg: defining a full Key Generation profile (rule) - * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * @key_cfg_buf: Zeroed memory whose size is sizeo of + * "struct dpni_ext_set_rx_tc_dist" before mapping it to DMA * * This function has to be called before the following functions: * - dpni_set_rx_tc_dist() @@ -18,7 +20,8 @@ * - dpkg_prepare_key_cfg() */ int -dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) +dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + void *key_cfg_buf) { int i, j; struct dpni_ext_set_rx_tc_dist *dpni_ext; @@ -27,11 +30,12 @@ dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) return -EINVAL; - dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext = key_cfg_buf; dpni_ext->num_extracts = cfg->num_extracts; for (i = 0; i < cfg->num_extracts; i++) { extr = &dpni_ext->extracts[i]; + memset(extr, 0, sizeof(struct dpni_dist_extract)); switch (cfg->extracts[i].type) { case DPKG_EXTRACT_FROM_HDR: diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c index 4d97b98939..558f08dc69 100644 --- a/drivers/net/dpaa2/mc/dpni.c +++ b/drivers/net/dpaa2/mc/dpni.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2022 NXP + * Copyright 2016-2023 NXP * */ #include @@ -852,6 +852,92 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io, return 0; } +/** + * dpni_get_qdid_ex() - Extension for the function to get the Queuing Destination ID (QDID) + * that should be used for enqueue operations. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Array of virtual QDID value that should be used as an argument + * in all enqueue operations. + * + * Return: '0' on Success; Error code otherwise. + * + * This function must be used when dpni is created using multiple Tx channels to return one + * qdid for each channel. + */ +int dpni_get_qdid_ex(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid_ex *rsp_params; + int i; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID_EX, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid_ex *)cmd.params; + for (i = 0; i < DPNI_MAX_CHANNELS; i++) + qdid[i] = le16_to_cpu(rsp_params->qdid[i]); + + return 0; +} + +/** + * dpni_get_sp_info() - Get the AIOP storage profile IDs associated + * with the DPNI + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @sp_info: Returned AIOP storage-profile information + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Only relevant for DPNI that belongs to AIOP container. + */ +int dpni_get_sp_info(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_sp_info *sp_info) +{ + struct dpni_rsp_get_sp_info *rsp_params; + struct mc_command cmd = { 0 }; + int err, i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_sp_info *)cmd.params; + for (i = 0; i < DPNI_MAX_SP; i++) + sp_info->spids[i] = le16_to_cpu(rsp_params->spids[i]); + + return 0; +} + /** * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) * @mc_io: Pointer to MC portal's I/O object @@ -1684,6 +1770,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object + * @ceetm_ch_idx: ceetm channel index * @mode: Tx confirmation mode * * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not @@ -1701,6 +1788,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t ceetm_ch_idx, enum dpni_confirmation_mode mode) { struct dpni_tx_confirmation_mode *cmd_params; @@ -1711,6 +1799,7 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, cmd_flags, token); cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params; + cmd_params->ceetm_ch_idx = ceetm_ch_idx; cmd_params->confirmation_mode = mode; /* send command to mc*/ @@ -1722,6 +1811,7 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object + * @ceetm_ch_idx: ceetm channel index * @mode: Tx confirmation mode * * Return: '0' on Success; Error code otherwise. @@ -1729,8 +1819,10 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t ceetm_ch_idx, enum dpni_confirmation_mode *mode) { + struct dpni_tx_confirmation_mode *cmd_params; struct dpni_tx_confirmation_mode *rsp_params; struct mc_command cmd = { 0 }; int err; @@ -1738,6 +1830,8 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONFIRMATION_MODE, cmd_flags, token); + cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params; + cmd_params->ceetm_ch_idx = ceetm_ch_idx; err = mc_send_command(mc_io, &cmd); if (err) @@ -1749,6 +1843,78 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, return 0; } +/** + * dpni_set_queue_tx_confirmation_mode() - Set Tx confirmation mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @ceetm_ch_idx: ceetm channel index + * @index: queue index + * @mode: Tx confirmation mode + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t ceetm_ch_idx, uint8_t index, + enum dpni_confirmation_mode mode) +{ + struct dpni_queue_tx_confirmation_mode *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE_TX_CONFIRMATION_MODE, + cmd_flags, + token); + cmd_params = (struct dpni_queue_tx_confirmation_mode *)cmd.params; + cmd_params->ceetm_ch_idx = ceetm_ch_idx; + cmd_params->index = index; + cmd_params->confirmation_mode = mode; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue_tx_confirmation_mode() - Get Tx confirmation mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @ceetm_ch_idx: ceetm channel index + * @index: queue index + * @mode: Tx confirmation mode + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t ceetm_ch_idx, uint8_t index, + enum dpni_confirmation_mode *mode) +{ + struct dpni_queue_tx_confirmation_mode *cmd_params; + struct dpni_queue_tx_confirmation_mode *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE_TX_CONFIRMATION_MODE, + cmd_flags, + token); + cmd_params = (struct dpni_queue_tx_confirmation_mode *)cmd.params; + cmd_params->ceetm_ch_idx = ceetm_ch_idx; + cmd_params->index = index; + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_queue_tx_confirmation_mode *)cmd.params; + *mode = rsp_params->confirmation_mode; + + return 0; +} + /** * dpni_set_qos_table() - Set QoS mapping table * @mc_io: Pointer to MC portal's I/O object @@ -2291,8 +2457,7 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported - * @param: Traffic class and channel. Bits[0-7] contain traaffic class, - * byte[8-15] contains channel id + * @tc_id: Traffic class selection (0-7) * @cfg: congestion notification configuration * * Return: '0' on Success; error code otherwise. @@ -3114,8 +3279,216 @@ int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, cmd_params = (struct dpni_cmd_set_port_cfg *)cmd.params; cmd_params->flags = cpu_to_le32(flags); - dpni_set_field(cmd_params->bit_params, PORT_LOOPBACK_EN, - !!port_cfg->loopback_en); + dpni_set_field(cmd_params->bit_params, PORT_LOOPBACK_EN, !!port_cfg->loopback_en); + + /* send command to MC */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_single_step_cfg() - return current configuration for single step PTP + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @ptp_cfg: ptp single step configuration + * + * Return: '0' on Success; Error code otherwise. + * + */ +int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + struct dpni_single_step_cfg *ptp_cfg) +{ + struct dpni_rsp_single_step_cfg *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SINGLE_STEP_CFG, + cmd_flags, + token); + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* read command response */ + rsp_params = (struct dpni_rsp_single_step_cfg *)cmd.params; + ptp_cfg->offset = le16_to_cpu(rsp_params->offset); + ptp_cfg->en = dpni_get_field(rsp_params->flags, PTP_ENABLE); + ptp_cfg->ch_update = dpni_get_field(rsp_params->flags, PTP_CH_UPDATE); + ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay); + ptp_cfg->ptp_onestep_reg_base = + le32_to_cpu(rsp_params->ptp_onestep_reg_base); + + return err; +} + +/** + * dpni_get_port_cfg() - return configuration from physical port. The command has effect only if + * dpni is connected to a mac object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @port_cfg: Configuration data + * The command can be called only when dpni is connected to a dpmac object. + * If the dpni is unconnected or the endpoint is not a dpni it will return error; + */ +int dpni_get_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + struct dpni_port_cfg *port_cfg) +{ + struct dpni_rsp_get_port_cfg *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_CFG, + cmd_flags, token); + + /* send command to MC */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* read command response */ + rsp_params = (struct dpni_rsp_get_port_cfg *)cmd.params; + port_cfg->loopback_en = dpni_get_field(rsp_params->bit_params, PORT_LOOPBACK_EN); + + return 0; +} + +/** + * dpni_set_single_step_cfg() - enable/disable and configure single step PTP + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @ptp_cfg: ptp single step configuration + * + * Return: '0' on Success; Error code otherwise. + * + * The function has effect only when dpni object is connected to a dpmac object. If the + * dpni is not connected to a dpmac the configuration will be stored inside and applied + * when connection is made. + */ +int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + struct dpni_single_step_cfg *ptp_cfg) +{ + struct dpni_cmd_single_step_cfg *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SINGLE_STEP_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_single_step_cfg *)cmd.params; + cmd_params->offset = cpu_to_le16(ptp_cfg->offset); + cmd_params->peer_delay = cpu_to_le32(ptp_cfg->peer_delay); + dpni_set_field(cmd_params->flags, PTP_ENABLE, !!ptp_cfg->en); + dpni_set_field(cmd_params->flags, PTP_CH_UPDATE, !!ptp_cfg->ch_update); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_dump_table() - Dump the content of table_type table into memory. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @table_type: The type of the table to dump + * @table_index: The index of the table to dump in case of more than one table + * @iova_addr: The snapshot will be stored in this variable as an header of struct dump_table_header + * followed by an array of struct dump_table_entry + * @iova_size: Memory size allocated for iova_addr + * @num_entries: Number of entries written in iova_addr + * + * Return: Completion status. '0' on Success; Error code otherwise. + * + * The memory allocated at iova_addr must be zeroed before command execution. + * If the table content exceeds the memory size provided the dump will be truncated. + */ +int dpni_dump_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t table_type, + uint16_t table_index, + uint64_t iova_addr, + uint32_t iova_size, + uint16_t *num_entries) +{ + struct mc_command cmd = { 0 }; + int err; + struct dpni_cmd_dump_table *cmd_params; + struct dpni_rsp_dump_table *rsp_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DUMP_TABLE, cmd_flags, token); + cmd_params = (struct dpni_cmd_dump_table *)cmd.params; + cmd_params->table_type = cpu_to_le16(table_type); + cmd_params->table_index = cpu_to_le16(table_index); + cmd_params->iova_addr = cpu_to_le64(iova_addr); + cmd_params->iova_size = cpu_to_le32(iova_size); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_dump_table *)cmd.params; + *num_entries = le16_to_cpu(rsp_params->num_entries); + + return 0; +} + +/* Sets up a Soft Parser Profile on this DPNI + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @sp_profile: Soft Parser Profile name (must a valid name for a defined profile) + * Maximum allowed length for this string is 8 characters long + * If this parameter is empty string (all zeros) + * then the Default SP Profile is set on this dpni + * @type: one of the SP Profile types defined above: Ingress or Egress (or both using bitwise OR) + */ +int dpni_set_sp_profile(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t sp_profile[], uint8_t type) +{ + struct dpni_cmd_set_sp_profile *cmd_params; + struct mc_command cmd = { 0 }; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SP_PROFILE, + cmd_flags, token); + + cmd_params = (struct dpni_cmd_set_sp_profile *)cmd.params; + for (i = 0; i < MAX_SP_PROFILE_ID_SIZE && sp_profile[i]; i++) + cmd_params->sp_profile[i] = sp_profile[i]; + cmd_params->type = type; + + /* send command to MC */ + return mc_send_command(mc_io, &cmd); +} + +/* Enable/Disable Soft Parser on this DPNI + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: one of the SP Profile types defined above: Ingress or Egress (or both using bitwise OR) + * @en: 1 to enable or 0 to disable + */ +int dpni_sp_enable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t type, uint8_t en) +{ + struct dpni_cmd_sp_enable *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SP_ENABLE, + cmd_flags, token); + + cmd_params = (struct dpni_cmd_sp_enable *)cmd.params; + cmd_params->type = type; + cmd_params->en = en; /* send command to MC */ return mc_send_command(mc_io, &cmd); diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h index 4600ea94d4..97b09e59f9 100644 --- a/drivers/net/dpaa2/mc/fsl_dpdmux.h +++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2018-2021 NXP + * Copyright 2018-2023 NXP * */ #ifndef __FSL_DPDMUX_H @@ -154,6 +154,10 @@ int dpdmux_reset(struct fsl_mc_io *mc_io, *Setting 1 DPDMUX_RESET will not reset multicast rules */ #define DPDMUX_SKIP_MULTICAST_RULES 0x04 +/** + *Setting 4 DPDMUX_RESET will not reset default interface + */ +#define DPDMUX_SKIP_RESET_DEFAULT_INTERFACE 0x08 int dpdmux_set_resetable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, @@ -464,10 +468,50 @@ int dpdmux_get_api_version(struct fsl_mc_io *mc_io, uint16_t *major_ver, uint16_t *minor_ver); +enum dpdmux_congestion_unit { + DPDMUX_TAIDLROP_DROP_UNIT_BYTE = 0, + DPDMUX_TAILDROP_DROP_UNIT_FRAMES, + DPDMUX_TAILDROP_DROP_UNIT_BUFFERS +}; + +/** + * struct dpdmux_taildrop_cfg - interface taildrop configuration + * @enable - enable (1 ) or disable (0) taildrop + * @units - taildrop units + * @threshold - taildtop threshold + */ +struct dpdmux_taildrop_cfg { + char enable; + enum dpdmux_congestion_unit units; + uint32_t threshold; +}; + +int dpdmux_if_set_taildrop(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t if_id, struct dpdmux_taildrop_cfg *cfg); + +int dpdmux_if_get_taildrop(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t if_id, struct dpdmux_taildrop_cfg *cfg); + +#define DPDMUX_MAX_KEY_SIZE 56 + +enum dpdmux_table_type { + DPDMUX_DMAT_TABLE = 1, + DPDMUX_MISS_TABLE = 2, + DPDMUX_PRUNE_TABLE = 3, +}; + +int dpdmux_dump_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t table_type, + uint16_t table_index, + uint64_t iova_addr, + uint32_t iova_size, + uint16_t *num_entries); + /** - * Discard bit. This bit must be used together with other bits in - * DPDMUX_ERROR_ACTION_CONTINUE to disable discarding of frames containing - * errors + * Discard bit. This bit must be used together with other bits in DPDMUX_ERROR_ACTION_CONTINUE + * to disable discarding of frames containing errors */ #define DPDMUX_ERROR_DISC 0x80000000 /** @@ -549,6 +593,22 @@ int dpdmux_get_api_version(struct fsl_mc_io *mc_io, */ #define DPDMUX__ERROR_L4CE 0x00000001 +#define DPDMUX_ALL_ERRORS (DPDMUX__ERROR_L4CE | \ + DPDMUX__ERROR_L4CV | \ + DPDMUX__ERROR_L3CE | \ + DPDMUX__ERROR_L3CV | \ + DPDMUX_ERROR_BLE | \ + DPDMUX_ERROR_PHE | \ + DPDMUX_ERROR_ISP | \ + DPDMUX_ERROR_PTE | \ + DPDMUX_ERROR_FPE | \ + DPDMUX_ERROR_FLE | \ + DPDMUX_ERROR_PIEE | \ + DPDMUX_ERROR_TIDE | \ + DPDMUX_ERROR_MNLE | \ + DPDMUX_ERROR_EOFHE | \ + DPDMUX_ERROR_KSE) + enum dpdmux_error_action { DPDMUX_ERROR_ACTION_DISCARD = 0, DPDMUX_ERROR_ACTION_CONTINUE = 1 @@ -567,4 +627,19 @@ struct dpdmux_error_cfg { int dpdmux_if_set_errors_behavior(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, uint16_t if_id, struct dpdmux_error_cfg *cfg); +/** + * SP Profile on Ingress DPDMUX + */ +#define DPDMUX_SP_PROFILE_INGRESS 0x1 +/** + * SP Profile on Egress DPDMUX + */ +#define DPDMUX_SP_PROFILE_EGRESS 0x2 + +int dpdmux_set_sp_profile(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t sp_profile[], uint8_t type); + +int dpdmux_sp_enable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t if_id, uint8_t type, uint8_t en); + #endif /* __FSL_DPDMUX_H */ diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h index bf6b8a20d1..a94f1bf91a 100644 --- a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h +++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2018-2021 NXP + * Copyright 2018-2023 NXP * */ #ifndef _FSL_DPDMUX_CMD_H @@ -9,7 +9,7 @@ /* DPDMUX Version */ #define DPDMUX_VER_MAJOR 6 -#define DPDMUX_VER_MINOR 9 +#define DPDMUX_VER_MINOR 10 #define DPDMUX_CMD_BASE_VERSION 1 #define DPDMUX_CMD_VERSION_2 2 @@ -63,8 +63,17 @@ #define DPDMUX_CMDID_SET_RESETABLE DPDMUX_CMD(0x0ba) #define DPDMUX_CMDID_GET_RESETABLE DPDMUX_CMD(0x0bb) + +#define DPDMUX_CMDID_IF_SET_TAILDROP DPDMUX_CMD(0x0bc) +#define DPDMUX_CMDID_IF_GET_TAILDROP DPDMUX_CMD(0x0bd) + +#define DPDMUX_CMDID_DUMP_TABLE DPDMUX_CMD(0x0be) + #define DPDMUX_CMDID_SET_ERRORS_BEHAVIOR DPDMUX_CMD(0x0bf) +#define DPDMUX_CMDID_SET_SP_PROFILE DPDMUX_CMD(0x0c0) +#define DPDMUX_CMDID_SP_ENABLE DPDMUX_CMD(0x0c1) + #define DPDMUX_MASK(field) \ GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \ DPDMUX_##field##_SHIFT) @@ -241,7 +250,7 @@ struct dpdmux_cmd_remove_custom_cls_entry { }; #define DPDMUX_SKIP_RESET_FLAGS_SHIFT 0 -#define DPDMUX_SKIP_RESET_FLAGS_SIZE 3 +#define DPDMUX_SKIP_RESET_FLAGS_SIZE 4 struct dpdmux_cmd_set_skip_reset_flags { uint8_t skip_reset_flags; @@ -251,6 +260,61 @@ struct dpdmux_rsp_get_skip_reset_flags { uint8_t skip_reset_flags; }; +struct dpdmux_cmd_set_taildrop { + uint32_t pad1; + uint16_t if_id; + uint16_t pad2; + uint16_t oal_en; + uint8_t units; + uint8_t pad3; + uint32_t threshold; +}; + +struct dpdmux_cmd_get_taildrop { + uint32_t pad1; + uint16_t if_id; +}; + +struct dpdmux_rsp_get_taildrop { + uint16_t pad1; + uint16_t pad2; + uint16_t if_id; + uint16_t pad3; + uint16_t oal_en; + uint8_t units; + uint8_t pad4; + uint32_t threshold; +}; + +struct dpdmux_cmd_dump_table { + uint16_t table_type; + uint16_t table_index; + uint32_t pad0; + uint64_t iova_addr; + uint32_t iova_size; +}; + +struct dpdmux_rsp_dump_table { + uint16_t num_entries; +}; + +struct dpdmux_dump_table_header { + uint16_t table_type; + uint16_t table_num_entries; + uint16_t table_max_entries; + uint8_t default_action; + uint8_t match_type; + uint8_t reserved[24]; +}; + +struct dpdmux_dump_table_entry { + uint8_t key[DPDMUX_MAX_KEY_SIZE]; + uint8_t mask[DPDMUX_MAX_KEY_SIZE]; + uint8_t key_action; + uint16_t result[3]; + uint8_t reserved[21]; +}; + #define DPDMUX_ERROR_ACTION_SHIFT 0 #define DPDMUX_ERROR_ACTION_SIZE 4 @@ -260,5 +324,18 @@ struct dpdmux_cmd_set_errors_behavior { uint16_t if_id; }; +#define MAX_SP_PROFILE_ID_SIZE 8 + +struct dpdmux_cmd_set_sp_profile { + uint8_t sp_profile[MAX_SP_PROFILE_ID_SIZE]; + uint8_t type; +}; + +struct dpdmux_cmd_sp_enable { + uint16_t if_id; + uint8_t type; + uint8_t en; +}; + #pragma pack(pop) #endif /* _FSL_DPDMUX_CMD_H */ diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h index 70f2339ea5..834c765513 100644 --- a/drivers/net/dpaa2/mc/fsl_dpkg.h +++ b/drivers/net/dpaa2/mc/fsl_dpkg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * Copyright 2013-2015 Freescale Semiconductor Inc. - * Copyright 2016-2021 NXP + * Copyright 2016-2023 NXP * */ #ifndef __FSL_DPKG_H_ @@ -180,7 +180,8 @@ struct dpni_ext_set_rx_tc_dist { struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; }; -int dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, - uint8_t *key_cfg_buf); +int +dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + void *key_cfg_buf); #endif /* __FSL_DPKG_H_ */ diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h index ce84f4265e..3a5fcfa8a5 100644 --- a/drivers/net/dpaa2/mc/fsl_dpni.h +++ b/drivers/net/dpaa2/mc/fsl_dpni.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2021 NXP + * Copyright 2016-2023 NXP * */ #ifndef __FSL_DPNI_H @@ -116,6 +116,11 @@ struct fsl_mc_io; * Flow steering table is shared between all traffic classes */ #define DPNI_OPT_SHARED_FS 0x001000 +/* + * Fq frame data, context and annotations stashing disable. + * The stashing is enabled by default. + */ +#define DPNI_OPT_STASHING_DIS 0x002000 /** * Software sequence maximum layout size */ @@ -147,6 +152,7 @@ int dpni_close(struct fsl_mc_io *mc_io, * DPNI_OPT_HAS_KEY_MASKING * DPNI_OPT_NO_FS * DPNI_OPT_SINGLE_SENDER + * DPNI_OPT_STASHING_DIS * @fs_entries: Number of entries in the flow steering table. * This table is used to select the ingress queue for * ingress traffic, targeting a GPP core or another. @@ -335,6 +341,7 @@ int dpni_clear_irq_status(struct fsl_mc_io *mc_io, * DPNI_OPT_SHARED_CONGESTION * DPNI_OPT_HAS_KEY_MASKING * DPNI_OPT_NO_FS + * DPNI_OPT_STASHING_DIS * @num_queues: Number of Tx and Rx queues used for traffic distribution. * @num_rx_tcs: Number of RX traffic classes (TCs), reserved for the DPNI. * @num_tx_tcs: Number of TX traffic classes (TCs), reserved for the DPNI. @@ -394,7 +401,7 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io, * error queue. To be used in dpni_set_errors_behavior() only if error_action * parameter is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE. */ -#define DPNI_ERROR_DISC 0x80000000 +#define DPNI_ERROR_DISC 0x80000000 /** * Extract out of frame header error @@ -576,6 +583,8 @@ enum dpni_offload { DPNI_OFF_TX_L3_CSUM, DPNI_OFF_TX_L4_CSUM, DPNI_FLCTYPE_HASH, + DPNI_HEADER_STASHING, + DPNI_PAYLOAD_STASHING, }; int dpni_set_offload(struct fsl_mc_io *mc_io, @@ -596,6 +605,26 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io, enum dpni_queue_type qtype, uint16_t *qdid); +int dpni_get_qdid_ex(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid); + +/** + * struct dpni_sp_info - Structure representing DPNI storage-profile information + * (relevant only for DPNI owned by AIOP) + * @spids: array of storage-profiles + */ +struct dpni_sp_info { + uint16_t spids[DPNI_MAX_SP]; +}; + +int dpni_get_sp_info(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_sp_info *sp_info); + int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, @@ -1443,11 +1472,25 @@ enum dpni_confirmation_mode { int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t ceetm_ch_idx, enum dpni_confirmation_mode mode); int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t ceetm_ch_idx, + enum dpni_confirmation_mode *mode); + +int dpni_set_queue_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t ceetm_ch_idx, uint8_t index, + enum dpni_confirmation_mode mode); + +int dpni_get_queue_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t ceetm_ch_idx, uint8_t index, enum dpni_confirmation_mode *mode); /** @@ -1841,6 +1884,60 @@ void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout, const uint8_t *sw_sequence_layout_buf); /** + * When used for queue_idx in function dpni_set_rx_dist_default_queue will signal to dpni + * to drop all unclassified frames + */ +#define DPNI_FS_MISS_DROP ((uint16_t)-1) + +/** + * struct dpni_rx_dist_cfg - distribution configuration + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8, + * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448, + * 512,768,896,1024 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise it can be '0' + * @enable: enable/disable the distribution. + * @tc: TC id for which distribution is set + * @fs_miss_flow_id: when packet misses all rules from flow steering table and hash is + * disabled it will be put into this queue id; use DPNI_FS_MISS_DROP to drop + * frames. The value of this field is used only when flow steering distribution + * is enabled and hash distribution is disabled + */ +struct dpni_rx_dist_cfg { + uint16_t dist_size; + uint64_t key_cfg_iova; + uint8_t enable; + uint8_t tc; + uint16_t fs_miss_flow_id; +}; + +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + const struct dpni_rx_dist_cfg *cfg); + +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + const struct dpni_rx_dist_cfg *cfg); + +int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t tpid); + +int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint16_t tpid); + +/** + * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID values + * used in current dpni object to detect 802.1q frames. + * @tpid1: first tag. Not used if zero. + * @tpid2: second tag. Not used if zero. + */ +struct dpni_custom_tpid_cfg { + uint16_t tpid1; + uint16_t tpid2; +}; + +int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + struct dpni_custom_tpid_cfg *tpid); +/* * struct dpni_ptp_cfg - configure single step PTP (IEEE 1588) * @en: enable single step PTP. When enabled the PTPv1 functionality will * not work. If the field is zero, offset and ch_update parameters @@ -1858,6 +1955,7 @@ struct dpni_single_step_cfg { uint8_t ch_update; uint16_t offset; uint32_t peer_delay; + uint32_t ptp_onestep_reg_base; }; int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, @@ -1885,61 +1983,35 @@ int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, int dpni_get_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, struct dpni_port_cfg *port_cfg); -/** - * When used for queue_idx in function dpni_set_rx_dist_default_queue will - * signal to dpni to drop all unclassified frames - */ -#define DPNI_FS_MISS_DROP ((uint16_t)-1) - -/** - * struct dpni_rx_dist_cfg - distribution configuration - * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8, - * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448, - * 512,768,896,1024 - * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with - * the extractions to be used for the distribution key by calling - * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise - * it can be '0' - * @enable: enable/disable the distribution. - * @tc: TC id for which distribution is set - * @fs_miss_flow_id: when packet misses all rules from flow steering table and - * hash is disabled it will be put into this queue id; use - * DPNI_FS_MISS_DROP to drop frames. The value of this field is - * used only when flow steering distribution is enabled and hash - * distribution is disabled - */ -struct dpni_rx_dist_cfg { - uint16_t dist_size; - uint64_t key_cfg_iova; - uint8_t enable; - uint8_t tc; - uint16_t fs_miss_flow_id; +enum dpni_table_type { + DPNI_FS_TABLE = 1, + DPNI_MAC_TABLE = 2, + DPNI_QOS_TABLE = 3, + DPNI_VLAN_TABLE = 4, }; -int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, - uint16_t token, const struct dpni_rx_dist_cfg *cfg); - -int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, - uint16_t token, const struct dpni_rx_dist_cfg *cfg); - -int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, - uint16_t token, uint16_t tpid); - -int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, - uint16_t token, uint16_t tpid); +int dpni_dump_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t table_type, + uint16_t table_index, + uint64_t iova_addr, + uint32_t iova_size, + uint16_t *num_entries); /** - * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID - * values used in current dpni object to detect 802.1q frames. - * @tpid1: first tag. Not used if zero. - * @tpid2: second tag. Not used if zero. + * SP Profile on Ingress DPNI */ -struct dpni_custom_tpid_cfg { - uint16_t tpid1; - uint16_t tpid2; -}; +#define DPNI_SP_PROFILE_INGRESS 0x1 +/** + * SP Profile on Egress DPNI + */ +#define DPNI_SP_PROFILE_EGRESS 0x2 + +int dpni_set_sp_profile(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t sp_profile[], uint8_t type); -int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, - uint16_t token, struct dpni_custom_tpid_cfg *tpid); +int dpni_sp_enable(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token, + uint8_t type, uint8_t en); #endif /* __FSL_DPNI_H */ diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h index 781f936add..1152182e34 100644 --- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h +++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2013-2016 Freescale Semiconductor Inc. - * Copyright 2016-2022 NXP + * Copyright 2016-2023 NXP * */ #ifndef _FSL_DPNI_CMD_H @@ -9,7 +9,7 @@ /* DPNI Version */ #define DPNI_VER_MAJOR 8 -#define DPNI_VER_MINOR 2 +#define DPNI_VER_MINOR 4 #define DPNI_CMD_BASE_VERSION 1 #define DPNI_CMD_VERSION_2 2 @@ -108,8 +108,8 @@ #define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V3(0x26A) #define DPNI_CMDID_GET_OFFLOAD DPNI_CMD_V2(0x26B) #define DPNI_CMDID_SET_OFFLOAD DPNI_CMD_V2(0x26C) -#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) -#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D) +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD_V2(0x266) +#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD_V2(0x26D) #define DPNI_CMDID_SET_OPR DPNI_CMD_V2(0x26e) #define DPNI_CMDID_GET_OPR DPNI_CMD_V2(0x26f) #define DPNI_CMDID_LOAD_SW_SEQUENCE DPNI_CMD(0x270) @@ -121,7 +121,16 @@ #define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276) #define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277) #define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278) +#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279) +#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a) #define DPNI_CMDID_SET_PORT_CFG DPNI_CMD(0x27B) +#define DPNI_CMDID_GET_PORT_CFG DPNI_CMD(0x27C) +#define DPNI_CMDID_DUMP_TABLE DPNI_CMD(0x27D) +#define DPNI_CMDID_SET_SP_PROFILE DPNI_CMD(0x27E) +#define DPNI_CMDID_GET_QDID_EX DPNI_CMD(0x27F) +#define DPNI_CMDID_SP_ENABLE DPNI_CMD(0x280) +#define DPNI_CMDID_SET_QUEUE_TX_CONFIRMATION_MODE DPNI_CMD(0x281) +#define DPNI_CMDID_GET_QUEUE_TX_CONFIRMATION_MODE DPNI_CMD(0x282) /* Macros for accessing command fields smaller than 1byte */ #define DPNI_MASK(field) \ @@ -329,6 +338,10 @@ struct dpni_rsp_get_qdid { uint16_t qdid; }; +struct dpni_rsp_get_qdid_ex { + uint16_t qdid[16]; +}; + struct dpni_rsp_get_sp_info { uint16_t spids[2]; }; @@ -748,7 +761,16 @@ struct dpni_cmd_set_taildrop { }; struct dpni_tx_confirmation_mode { - uint32_t pad; + uint8_t ceetm_ch_idx; + uint8_t pad1; + uint16_t pad2; + uint8_t confirmation_mode; +}; + +struct dpni_queue_tx_confirmation_mode { + uint8_t ceetm_ch_idx; + uint8_t index; + uint16_t pad; uint8_t confirmation_mode; }; @@ -894,6 +916,42 @@ struct dpni_sw_sequence_layout_entry { uint16_t pad; }; +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_fs_dist { + uint16_t dist_size; + uint8_t enable; + uint8_t tc; + uint16_t miss_flow_id; + uint16_t pad1; + uint64_t key_cfg_iova; +}; + +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_hash_dist { + uint16_t dist_size; + uint8_t enable; + uint8_t tc_id; + uint32_t pad; + uint64_t key_cfg_iova; +}; + +struct dpni_cmd_add_custom_tpid { + uint16_t pad; + uint16_t tpid; +}; + +struct dpni_cmd_remove_custom_tpid { + uint16_t pad; + uint16_t tpid; +}; + +struct dpni_rsp_get_custom_tpid { + uint16_t tpid1; + uint16_t tpid2; +}; + #define DPNI_PTP_ENABLE_SHIFT 0 #define DPNI_PTP_ENABLE_SIZE 1 #define DPNI_PTP_CH_UPDATE_SHIFT 1 @@ -925,40 +983,45 @@ struct dpni_rsp_get_port_cfg { uint32_t bit_params; }; -#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 -#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 -struct dpni_cmd_set_rx_fs_dist { - uint16_t dist_size; - uint8_t enable; - uint8_t tc; - uint16_t miss_flow_id; - uint16_t pad1; - uint64_t key_cfg_iova; +struct dpni_cmd_dump_table { + uint16_t table_type; + uint16_t table_index; + uint32_t pad0; + uint64_t iova_addr; + uint32_t iova_size; }; -#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 -#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 -struct dpni_cmd_set_rx_hash_dist { - uint16_t dist_size; - uint8_t enable; - uint8_t tc_id; - uint32_t pad; - uint64_t key_cfg_iova; +struct dpni_rsp_dump_table { + uint16_t num_entries; }; -struct dpni_cmd_add_custom_tpid { - uint16_t pad; - uint16_t tpid; +struct dump_table_header { + uint16_t table_type; + uint16_t table_num_entries; + uint16_t table_max_entries; + uint8_t default_action; + uint8_t match_type; + uint8_t reserved[24]; }; -struct dpni_cmd_remove_custom_tpid { - uint16_t pad; - uint16_t tpid; +struct dump_table_entry { + uint8_t key[DPNI_MAX_KEY_SIZE]; + uint8_t mask[DPNI_MAX_KEY_SIZE]; + uint8_t key_action; + uint16_t result[3]; + uint8_t reserved[21]; }; -struct dpni_rsp_get_custom_tpid { - uint16_t tpid1; - uint16_t tpid2; +#define MAX_SP_PROFILE_ID_SIZE 8 + +struct dpni_cmd_set_sp_profile { + uint8_t sp_profile[MAX_SP_PROFILE_ID_SIZE]; + uint8_t type; +}; + +struct dpni_cmd_sp_enable { + uint8_t type; + uint8_t en; }; #pragma pack(pop) diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2.h b/drivers/net/dpaa2/rte_pmd_dpaa2.h index a1152eb717..237c3cd6e7 100644 --- a/drivers/net/dpaa2/rte_pmd_dpaa2.h +++ b/drivers/net/dpaa2/rte_pmd_dpaa2.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2021 NXP + * Copyright 2018-2024 NXP */ #ifndef _RTE_PMD_DPAA2_H @@ -26,12 +26,36 @@ * Associated actions. * * @return - * A valid handle in case of success, NULL otherwise. + * 0 in case of success, otherwise failure. */ -struct rte_flow * +int rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, - struct rte_flow_item *pattern[], - struct rte_flow_action *actions[]); + struct rte_flow_item pattern[], + struct rte_flow_action actions[]); +int +rte_pmd_dpaa2_mux_flow_destroy(uint32_t dpdmux_id, + uint16_t entry_index); +int +rte_pmd_dpaa2_mux_flow_l2(uint32_t dpdmux_id, + uint8_t mac_addr[6], uint16_t vlan_id, int dest_if); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Dump demultiplex ethernet traffic counters + * + * @param f + * output stream + * @param dpdmux_id + * ID of the DPDMUX MC object. + * @param num_if + * number of interface in dpdmux object + * + */ +__rte_experimental +void +rte_pmd_dpaa2_mux_dump_counter(FILE *f, uint32_t dpdmux_id, int num_if); /** * @warning @@ -102,4 +126,21 @@ rte_pmd_dpaa2_thread_init(void); __rte_experimental uint32_t rte_pmd_dpaa2_get_tlu_hash(uint8_t *key, int size); + +__rte_experimental +int +rte_pmd_dpaa2_dev_is_dpaa2(uint32_t eth_id); +__rte_experimental +const char * +rte_pmd_dpaa2_ep_name(uint32_t eth_id); + +#if defined(RTE_LIBRTE_IEEE1588) +__rte_experimental +int +rte_pmd_dpaa2_set_one_step_ts(uint16_t port_id, uint16_t offset, uint8_t ch_update); + +__rte_experimental +int +rte_pmd_dpaa2_get_one_step_ts(uint16_t port_id, bool mc_query); +#endif #endif /* _RTE_PMD_DPAA2_H */ diff --git a/drivers/net/dpaa2/version.map b/drivers/net/dpaa2/version.map index ba756d26bd..35815f7777 100644 --- a/drivers/net/dpaa2/version.map +++ b/drivers/net/dpaa2/version.map @@ -16,6 +16,12 @@ EXPERIMENTAL { rte_pmd_dpaa2_thread_init; # added in 21.11 rte_pmd_dpaa2_get_tlu_hash; + # added in 24.11 + rte_pmd_dpaa2_dev_is_dpaa2; + rte_pmd_dpaa2_ep_name; + rte_pmd_dpaa2_set_one_step_ts; + rte_pmd_dpaa2_get_one_step_ts; + rte_pmd_dpaa2_mux_dump_counter; }; INTERNAL { diff --git a/drivers/net/e1000/base/meson.build b/drivers/net/e1000/base/meson.build index 528a33f958..6d6048488f 100644 --- a/drivers/net/e1000/base/meson.build +++ b/drivers/net/e1000/base/meson.build @@ -22,9 +22,10 @@ sources = [ 'e1000_vf.c', ] -error_cflags = ['-Wno-uninitialized', '-Wno-unused-parameter', - '-Wno-unused-variable', '-Wno-misleading-indentation', - '-Wno-implicit-fallthrough'] +error_cflags = [ + '-Wno-unused-parameter', + '-Wno-implicit-fallthrough', +] c_args = cflags foreach flag: error_cflags if cc.has_argument(flag) diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index d61eaad2de..4276bb6d31 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -1868,6 +1868,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) struct igb_rx_queue *rxq; for (i = 0; i < dev->data->nb_tx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); txq = dev->data->tx_queues[i]; if (txq != NULL) { igb_tx_queue_release_mbufs(txq); @@ -1877,6 +1878,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) } for (i = 0; i < dev->data->nb_rx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); rxq = dev->data->rx_queues[i]; if (rxq != NULL) { igb_rx_queue_release_mbufs(rxq); diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 1dec54fb58..bba48ea1cc 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -22,7 +22,7 @@ #include #define DRV_MODULE_VER_MAJOR 2 -#define DRV_MODULE_VER_MINOR 10 +#define DRV_MODULE_VER_MINOR 11 #define DRV_MODULE_VER_SUBMINOR 0 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) @@ -162,6 +162,8 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(mbuf_alloc_fail), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_req_id), + ENA_STAT_RX_ENTRY(bad_desc), + ENA_STAT_RX_ENTRY(unknown_error), }; #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) @@ -1262,7 +1264,9 @@ static int ena_stats_get(struct rte_eth_dev *dev, stats->q_ibytes[i] = rx_stats->bytes; stats->q_ipackets[i] = rx_stats->cnt; stats->q_errors[i] = rx_stats->bad_desc_num + - rx_stats->bad_req_id; + rx_stats->bad_req_id + + rx_stats->bad_desc + + rx_stats->unknown_error; } max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, @@ -2772,6 +2776,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); unsigned int free_queue_entries; uint16_t next_to_clean = rx_ring->next_to_clean; + enum ena_regs_reset_reason_types reset_reason; uint16_t descs_in_use; struct rte_mbuf *mbuf; uint16_t completed; @@ -2804,15 +2809,25 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, PMD_RX_LOG_LINE(ERR, "Failed to get the packet from the device, rc: %d", rc); - if (rc == ENA_COM_NO_SPACE) { + switch (rc) { + case ENA_COM_NO_SPACE: ++rx_ring->rx_stats.bad_desc_num; - ena_trigger_reset(rx_ring->adapter, - ENA_REGS_RESET_TOO_MANY_RX_DESCS); - } else { + reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; + break; + case ENA_COM_FAULT: + ++rx_ring->rx_stats.bad_desc; + reset_reason = ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED; + break; + case ENA_COM_EIO: ++rx_ring->rx_stats.bad_req_id; - ena_trigger_reset(rx_ring->adapter, - ENA_REGS_RESET_INV_RX_REQ_ID); + reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + break; + default: + ++rx_ring->rx_stats.unknown_error; + reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; + break; } + ena_trigger_reset(rx_ring->adapter, reset_reason); return 0; } diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index fe7d4a2d65..b8aead8f46 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -140,6 +140,8 @@ struct ena_stats_rx { u64 mbuf_alloc_fail; u64 bad_desc_num; u64 bad_req_id; + u64 bad_desc; + u64 unknown_error; }; struct __rte_cache_aligned ena_ring { diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c index 8c7067fbb5..91c0f60490 100644 --- a/drivers/net/enetfec/enet_ethdev.c +++ b/drivers/net/enetfec/enet_ethdev.c @@ -6,6 +6,7 @@ #include #include +#include #include #include "enet_pmd_logs.h" @@ -374,7 +375,7 @@ enetfec_tx_queue_setup(struct rte_eth_dev *dev, unsigned int size; unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : sizeof(struct bufdesc); - unsigned int dsize_log2 = fls64(dsize); + unsigned int dsize_log2 = rte_fls_u64(dsize) - 1; /* Tx deferred start is not supported */ if (tx_conf->tx_deferred_start) { @@ -453,7 +454,7 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev, unsigned int size; unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : sizeof(struct bufdesc); - unsigned int dsize_log2 = fls64(dsize); + unsigned int dsize_log2 = rte_fls_u64(dsize) - 1; /* Rx deferred start is not supported */ if (rx_conf->rx_deferred_start) { diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h index 02a3397890..4e196b8552 100644 --- a/drivers/net/enetfec/enet_ethdev.h +++ b/drivers/net/enetfec/enet_ethdev.h @@ -125,12 +125,6 @@ bufdesc *enet_get_nextdesc(struct bufdesc *bdp, struct bufdesc_prop *bd) : (struct bufdesc *)(((uintptr_t)bdp) + bd->d_size); } -static inline int -fls64(unsigned long word) -{ - return (64 - __builtin_clzl(word)) - 1; -} - static inline struct bufdesc *enet_get_prevdesc(struct bufdesc *bdp, struct bufdesc_prop *bd) { diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c index 9c013e0419..5321c3385c 100644 --- a/drivers/net/failsafe/failsafe_ops.c +++ b/drivers/net/failsafe/failsafe_ops.c @@ -111,12 +111,14 @@ fs_set_queues_state_start(struct rte_eth_dev *dev) uint16_t i; for (i = 0; i < dev->data->nb_rx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); rxq = dev->data->rx_queues[i]; if (rxq != NULL && !rxq->info.conf.rx_deferred_start) dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } for (i = 0; i < dev->data->nb_tx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); txq = dev->data->tx_queues[i]; if (txq != NULL && !txq->info.conf.tx_deferred_start) dev->data->tx_queue_state[i] = @@ -176,14 +178,18 @@ fs_set_queues_state_stop(struct rte_eth_dev *dev) { uint16_t i; - for (i = 0; i < dev->data->nb_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); if (dev->data->rx_queues[i] != NULL) dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; - for (i = 0; i < dev->data->nb_tx_queues; i++) + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); if (dev->data->tx_queues[i] != NULL) dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } } static int diff --git a/drivers/net/fm10k/base/meson.build b/drivers/net/fm10k/base/meson.build index bd19df27f7..f24e453fd0 100644 --- a/drivers/net/fm10k/base/meson.build +++ b/drivers/net/fm10k/base/meson.build @@ -10,10 +10,10 @@ sources = [ 'fm10k_vf.c', ] -error_cflags = ['-Wno-unused-parameter', '-Wno-unused-value', - '-Wno-strict-aliasing', '-Wno-format-extra-args', - '-Wno-unused-variable', - '-Wno-implicit-fallthrough' +error_cflags = [ + '-Wno-unused-parameter', + '-Wno-unused-value', + '-Wno-implicit-fallthrough', ] c_args = cflags foreach flag: error_cflags diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 3c5fdbef8f..146444e2fa 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -545,9 +545,7 @@ hns3_set_dcb_capability(struct hns3_hw *hw) if (device_id == HNS3_DEV_ID_25GE_RDMA || device_id == HNS3_DEV_ID_50GE_RDMA || device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || - device_id == HNS3_DEV_ID_200G_RDMA || - device_id == HNS3_DEV_ID_100G_ROH || - device_id == HNS3_DEV_ID_200G_ROH) + device_id == HNS3_DEV_ID_200G_RDMA) hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); } diff --git a/drivers/net/hns3/hns3_common.c b/drivers/net/hns3/hns3_common.c index 5e6cdfdaa0..25a45212be 100644 --- a/drivers/net/hns3/hns3_common.c +++ b/drivers/net/hns3/hns3_common.c @@ -272,6 +272,45 @@ hns3_parse_vlan_match_mode(const char *key, const char *value, void *args) return 0; } +static int +hns3_parse_fdir_tuple_config(const char *key, const char *value, void *args) +{ + enum hns3_fdir_tuple_config tuple_cfg; + + tuple_cfg = hns3_parse_tuple_config(value); + if (tuple_cfg == HNS3_FDIR_TUPLE_CONFIG_DEFAULT || + tuple_cfg == HNS3_FDIR_TUPLE_CONFIG_BUTT) { + PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\"", + value, key); + return -1; + } + + *(enum hns3_fdir_tuple_config *)args = tuple_cfg; + + return 0; +} + +static int +hns3_parse_fdir_index_config(const char *key, const char *value, void *args) +{ + enum hns3_fdir_index_config cfg; + + if (strcmp(value, "hash") == 0) { + cfg = HNS3_FDIR_INDEX_CONFIG_HASH; + } else if (strcmp(value, "priority") == 0) { + cfg = HNS3_FDIR_INDEX_CONFIG_PRIORITY; + } else { + PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " + "value must be 'hash' or 'priority'", + value, key); + return -1; + } + + *(enum hns3_fdir_index_config *)args = cfg; + + return 0; +} + void hns3_parse_devargs(struct rte_eth_dev *dev) { @@ -306,11 +345,20 @@ hns3_parse_devargs(struct rte_eth_dev *dev) &hns3_parse_dev_caps_mask, &dev_caps_mask); (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS, &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); - if (!hns->is_vf) + if (!hns->is_vf) { (void)rte_kvargs_process(kvlist, - HNS3_DEVARG_FDIR_VALN_MATCH_MODE, + HNS3_DEVARG_FDIR_VLAN_MATCH_MODE, &hns3_parse_vlan_match_mode, &hns->pf.fdir.vlan_match_mode); + (void)rte_kvargs_process(kvlist, + HNS3_DEVARG_FDIR_TUPLE_CONFIG, + &hns3_parse_fdir_tuple_config, + &hns->pf.fdir.tuple_cfg); + (void)rte_kvargs_process(kvlist, + HNS3_DEVARG_FDIR_INDEX_CONFIG, + &hns3_parse_fdir_index_config, + &hns->pf.fdir.index_cfg); + } rte_kvargs_free(kvlist); diff --git a/drivers/net/hns3/hns3_common.h b/drivers/net/hns3/hns3_common.h index cf9593bd0c..7b3f96b01a 100644 --- a/drivers/net/hns3/hns3_common.h +++ b/drivers/net/hns3/hns3_common.h @@ -27,7 +27,9 @@ enum { #define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" -#define HNS3_DEVARG_FDIR_VALN_MATCH_MODE "fdir_vlan_match_mode" +#define HNS3_DEVARG_FDIR_VLAN_MATCH_MODE "fdir_vlan_match_mode" +#define HNS3_DEVARG_FDIR_TUPLE_CONFIG "fdir_tuple_config" +#define HNS3_DEVARG_FDIR_INDEX_CONFIG "fdir_index_config" #define MSEC_PER_SEC 1000L #define USEC_PER_MSEC 1000L diff --git a/drivers/net/hns3/hns3_dump.c b/drivers/net/hns3/hns3_dump.c index fff44b9514..738dcb0c42 100644 --- a/drivers/net/hns3/hns3_dump.c +++ b/drivers/net/hns3/hns3_dump.c @@ -169,6 +169,8 @@ hns3_get_fdir_basic_info(FILE *file, struct hns3_pf *pf) "\t -- mode=%u max_key_len=%u rule_num:%u cnt_num:%u\n" "\t -- key_sel=%u tuple_active=0x%x meta_data_active=0x%x\n" "\t -- ipv6_word_en: in_s=%u in_d=%u out_s=%u out_d=%u\n" + "\t -- index_cfg: %s\n" + "\t -- tuple_config: %s\n" "\t -- active_tuples:\n", fdcfg->fd_mode, fdcfg->max_key_length, fdcfg->rule_num[HNS3_FD_STAGE_1], @@ -179,7 +181,9 @@ hns3_get_fdir_basic_info(FILE *file, struct hns3_pf *pf) fdcfg->key_cfg[HNS3_FD_STAGE_1].inner_sipv6_word_en, fdcfg->key_cfg[HNS3_FD_STAGE_1].inner_dipv6_word_en, fdcfg->key_cfg[HNS3_FD_STAGE_1].outer_sipv6_word_en, - fdcfg->key_cfg[HNS3_FD_STAGE_1].outer_dipv6_word_en); + fdcfg->key_cfg[HNS3_FD_STAGE_1].outer_dipv6_word_en, + hns3_fdir_index_config_name(pf->fdir.index_cfg), + hns3_tuple_config_name(pf->fdir.tuple_cfg)); for (i = 0; i < MAX_TUPLE; i++) { if (!(fdcfg->key_cfg[HNS3_FD_STAGE_1].tuple_active & BIT(i))) diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 8b43d731ac..72d1c30a7b 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -6651,8 +6651,6 @@ static const struct rte_pci_id pci_id_hns3_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) }, { .vendor_id = 0, }, /* sentinel */ }; @@ -6670,7 +6668,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_hns3, HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " - HNS3_DEVARG_MBX_TIME_LIMIT_MS "= "); + HNS3_DEVARG_MBX_TIME_LIMIT_MS "= " + HNS3_DEVARG_FDIR_VLAN_MATCH_MODE "=strict|nostrict " + HNS3_DEVARG_FDIR_TUPLE_CONFIG "=+outvlan-insmac|+outvlan-indmac|" + "+outvlan-insip|+outvlan-indip" + "+outvlan-sctptag|+outvlan-tunvni " + HNS3_DEVARG_FDIR_INDEX_CONFIG "=hash|priority "); RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); #ifdef RTE_ETHDEV_DEBUG_RX diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 799b61038a..7824503bb8 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -28,9 +28,7 @@ #define HNS3_DEV_ID_25GE_RDMA 0xA222 #define HNS3_DEV_ID_50GE_RDMA 0xA224 #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 -#define HNS3_DEV_ID_100G_ROH 0xA227 #define HNS3_DEV_ID_200G_RDMA 0xA228 -#define HNS3_DEV_ID_200G_ROH 0xA22C #define HNS3_DEV_ID_100G_VF 0xA22E #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c index 4843066723..aacad40e61 100644 --- a/drivers/net/hns3/hns3_fdir.c +++ b/drivers/net/hns3/hns3_fdir.c @@ -300,6 +300,58 @@ static int hns3_set_fd_key_config(struct hns3_adapter *hns) return ret; } +static void hns3_set_tuple_config(struct hns3_adapter *hns, + struct hns3_fd_key_cfg *key_cfg) +{ + enum hns3_fdir_tuple_config tuple_cfg = hns->pf.fdir.tuple_cfg; + + if (tuple_cfg == HNS3_FDIR_TUPLE_CONFIG_DEFAULT) + return; + + if (hns->pf.fdir.fd_cfg.max_key_length != MAX_KEY_LENGTH) { + hns3_warn(&hns->hw, "fdir tuple config only valid with 400bit key!"); + return; + } + + switch (tuple_cfg) { + case HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INSMAC: + key_cfg->tuple_active &= ~BIT(INNER_SRC_MAC); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_FST); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_SEC); + break; + case HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INDMAC: + key_cfg->tuple_active &= ~BIT(INNER_DST_MAC); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_FST); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_SEC); + break; + case HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INSIP: + key_cfg->tuple_active &= ~BIT(INNER_SRC_IP); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_FST); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_SEC); + break; + case HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INDIP: + key_cfg->tuple_active &= ~BIT(INNER_DST_IP); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_FST); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_SEC); + break; + case HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_SCTPTAG: + key_cfg->tuple_active &= ~BIT(INNER_SCTP_TAG); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_FST); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_SEC); + break; + case HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_TUNVNI: + key_cfg->tuple_active &= ~BIT(OUTER_TUN_VNI); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_FST); + key_cfg->tuple_active |= BIT(OUTER_VLAN_TAG_SEC); + break; + default: + hns3_err(&hns->hw, "invalid fdir tuple config %u!", tuple_cfg); + return; + } + + hns3_info(&hns->hw, "fdir tuple config %s!", hns3_tuple_config_name(tuple_cfg)); +} + int hns3_init_fd_config(struct hns3_adapter *hns) { struct hns3_pf *pf = &hns->pf; @@ -352,6 +404,8 @@ int hns3_init_fd_config(struct hns3_adapter *hns) "l4_src_port l4_dst_port tun_vni tun_flow_id>"); } + hns3_set_tuple_config(hns, key_cfg); + /* roce_type is used to filter roce frames * dst_vport is used to specify the rule */ @@ -500,6 +554,14 @@ static void hns3_fd_convert_int16(uint32_t tuple, struct hns3_fdir_rule *rule, uint16_t key; switch (tuple) { + case OUTER_VLAN_TAG_FST: + key = rule->key_conf.spec.outer_vlan_tag1; + mask = rule->key_conf.mask.outer_vlan_tag1; + break; + case OUTER_VLAN_TAG_SEC: + key = rule->key_conf.spec.outer_vlan_tag2; + mask = rule->key_conf.mask.outer_vlan_tag2; + break; case OUTER_SRC_PORT: key = rule->key_conf.spec.outer_src_port; mask = rule->key_conf.mask.outer_src_port; @@ -575,6 +637,8 @@ static bool hns3_fd_convert_tuple(struct hns3_hw *hw, hns3_fd_convert_mac(key_conf->spec.src_mac, key_conf->mask.src_mac, key_x, key_y); break; + case OUTER_VLAN_TAG_FST: + case OUTER_VLAN_TAG_SEC: case OUTER_SRC_PORT: case OUTER_DST_PORT: case OUTER_ETH_TYPE: @@ -836,6 +900,7 @@ int hns3_fdir_filter_init(struct hns3_adapter *hns) .key_len = sizeof(struct hns3_fdir_key_conf), .hash_func = rte_hash_crc, .hash_func_init_val = 0, + .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE, }; int ret; @@ -917,39 +982,44 @@ static int hns3_insert_fdir_filter(struct hns3_hw *hw, { struct hns3_fdir_key_conf *key; hash_sig_t sig; - int ret; + int index; key = &fdir_filter->fdir_conf.key_conf; sig = rte_hash_crc(key, sizeof(*key), 0); - ret = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig); - if (ret < 0) { - hns3_err(hw, "Hash table full? err:%d!", ret); - return ret; + index = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig); + if (index < 0) { + hns3_err(hw, "Hash table full? err:%d!", index); + return index; } - fdir_info->hash_map[ret] = fdir_filter; + if (fdir_info->index_cfg == HNS3_FDIR_INDEX_CONFIG_PRIORITY) + index = fdir_filter->fdir_conf.location; + + fdir_info->hash_map[index] = fdir_filter; TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries); - return ret; + return index; } static int hns3_remove_fdir_filter(struct hns3_hw *hw, struct hns3_fdir_info *fdir_info, - struct hns3_fdir_key_conf *key) + struct hns3_fdir_rule *rule) { struct hns3_fdir_rule_ele *fdir_filter; hash_sig_t sig; - int ret; + int index; - sig = rte_hash_crc(key, sizeof(*key), 0); - ret = rte_hash_del_key_with_hash(fdir_info->hash_handle, key, sig); - if (ret < 0) { - hns3_err(hw, "Delete hash key fail ret=%d", ret); - return ret; + sig = rte_hash_crc(&rule->key_conf, sizeof(rule->key_conf), 0); + index = rte_hash_del_key_with_hash(fdir_info->hash_handle, &rule->key_conf, sig); + if (index < 0) { + hns3_err(hw, "Delete hash key fail ret=%d", index); + return index; } - fdir_filter = fdir_info->hash_map[ret]; - fdir_info->hash_map[ret] = NULL; + if (fdir_info->index_cfg == HNS3_FDIR_INDEX_CONFIG_PRIORITY) + index = rule->location; + fdir_filter = fdir_info->hash_map[index]; + fdir_info->hash_map[index] = NULL; TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries); rte_free(fdir_filter); @@ -978,7 +1048,7 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, rule->key_conf.spec.src_port, rule->key_conf.spec.dst_port, ret); else - ret = hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); + ret = hns3_remove_fdir_filter(hw, fdir_info, rule); return ret; } @@ -1016,7 +1086,7 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID], rule->key_conf.spec.src_port, rule->key_conf.spec.dst_port, ret); - (void)hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); + (void)hns3_remove_fdir_filter(hw, fdir_info, rule); } return ret; @@ -1128,3 +1198,63 @@ int hns3_fd_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value) return ret; } + +static struct { + enum hns3_fdir_tuple_config tuple_cfg; + const char *name; +} tuple_config_map[] = { + { HNS3_FDIR_TUPLE_CONFIG_DEFAULT, "default" }, + { HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INSMAC, "+outvlan-insmac" }, + { HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INDMAC, "+outvlan-indmac" }, + { HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INSIP, "+outvlan-insip" }, + { HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INDIP, "+outvlan-indip" }, + { HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_SCTPTAG, "+outvlan-sctptag" }, + { HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_TUNVNI, "+outvlan-tunvni" } +}; + +enum hns3_fdir_tuple_config +hns3_parse_tuple_config(const char *name) +{ + uint32_t i; + + for (i = 0; i < RTE_DIM(tuple_config_map); i++) { + if (!strcmp(name, tuple_config_map[i].name)) + return tuple_config_map[i].tuple_cfg; + } + + return HNS3_FDIR_TUPLE_CONFIG_BUTT; +} + +const char * +hns3_tuple_config_name(enum hns3_fdir_tuple_config tuple_cfg) +{ + uint32_t i; + + for (i = 0; i < RTE_DIM(tuple_config_map); i++) { + if (tuple_cfg == tuple_config_map[i].tuple_cfg) + return tuple_config_map[i].name; + } + + return "unknown"; +} + +static struct { + enum hns3_fdir_index_config cfg; + const char *name; +} index_cfg_map[] = { + { HNS3_FDIR_INDEX_CONFIG_HASH, "hash"}, + { HNS3_FDIR_INDEX_CONFIG_PRIORITY, "priority"}, +}; + +const char * +hns3_fdir_index_config_name(enum hns3_fdir_index_config cfg) +{ + uint32_t i; + + for (i = 0; i < RTE_DIM(index_cfg_map); i++) { + if (cfg == index_cfg_map[i].cfg) + return index_cfg_map[i].name; + } + + return "unknown"; +} diff --git a/drivers/net/hns3/hns3_fdir.h b/drivers/net/hns3/hns3_fdir.h index 6ccd90a253..5ba7b5b60d 100644 --- a/drivers/net/hns3/hns3_fdir.h +++ b/drivers/net/hns3/hns3_fdir.h @@ -97,6 +97,8 @@ struct hns3_fd_rule_tuples { uint32_t sctp_tag; uint16_t outer_src_port; uint16_t tunnel_type; + uint16_t outer_vlan_tag1; + uint16_t outer_vlan_tag2; uint16_t outer_ether_type; uint8_t outer_proto; uint8_t outer_tun_vni[VNI_OR_TNI_LEN]; @@ -181,6 +183,59 @@ TAILQ_HEAD(hns3_fdir_rule_list, hns3_fdir_rule_ele); #define HNS3_FDIR_VLAN_STRICT_MATCH 1 #define HNS3_FDIR_VLAN_NOSTRICT_MATCH 0 +/* + * The hardware supports many tuples match (see @enum HNS3_FD_TUPLE), + * however, the width of hardware entries is limited, therefore, only part + * of tuples are enabled (see as @hns3_init_fd_config). + * + * We should replace the existing tuples if we want to enable other tuples + * because the width capacity is insufficient. + */ +enum hns3_fdir_tuple_config { + /* Default tuple config (see as @hns3_init_fd_config). */ + HNS3_FDIR_TUPLE_CONFIG_DEFAULT, + /* + * Based on the default tuple config, disable the inner src-mac tuple, + * and enable the outer VLAN tuple. + */ + HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INSMAC, + /* + * Based on the default tuple config, disable the inner dst-mac tuple, + * and enable the outer VLAN tuple. + */ + HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INDMAC, + /* + * Based on the default tuple config, disable the inner src-ip tuple, + * and enable the outer VLAN tuple. + */ + HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INSIP, + /* + * Based on the default tuple config, disable the inner dst-ip tuple, + * and enable the outer VLAN tuple. + */ + HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_INDIP, + /* + * Based on the default tuple config, disable the sctp-tag tuple, + * and enable the outer VLAN tuple. + */ + HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_SCTPTAG, + /* + * Based on the default tuple config, disable the tunnel vni tuple, + * and enable the outer VLAN tuple. + */ + HNS3_FDIR_TUPLE_OUTVLAN_REPLACE_TUNVNI, + + HNS3_FDIR_TUPLE_CONFIG_BUTT +}; + +enum hns3_fdir_index_config { + /* Generate the hardware flow director index based on rte_hash (Default) */ + HNS3_FDIR_INDEX_CONFIG_HASH, + + /* Use the rte_flow priority field as the hardware flow director index. */ + HNS3_FDIR_INDEX_CONFIG_PRIORITY +}; + /* * A structure used to define fields of a FDIR related info. */ @@ -190,6 +245,8 @@ struct hns3_fdir_info { struct rte_hash *hash_handle; struct hns3_fd_cfg fd_cfg; uint8_t vlan_match_mode; + enum hns3_fdir_tuple_config tuple_cfg; + enum hns3_fdir_index_config index_cfg; }; struct hns3_adapter; @@ -204,4 +261,8 @@ int hns3_clear_all_fdir_filter(struct hns3_adapter *hns); int hns3_fd_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value); int hns3_restore_all_fdir_filter(struct hns3_adapter *hns); +enum hns3_fdir_tuple_config hns3_parse_tuple_config(const char *name); +const char *hns3_tuple_config_name(enum hns3_fdir_tuple_config tuple_cfg); +const char *hns3_fdir_index_config_name(enum hns3_fdir_index_config cfg); + #endif /* HNS3_FDIR_H */ diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index bf1eee506d..266934b45b 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -155,13 +155,15 @@ static enum rte_flow_item_type first_items[] = { RTE_FLOW_ITEM_TYPE_NVGRE, RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_GENEVE, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_PTYPE }; static enum rte_flow_item_type L2_next_items[] = { RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6 + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_PTYPE }; static enum rte_flow_item_type L3_next_items[] = { @@ -169,7 +171,8 @@ static enum rte_flow_item_type L3_next_items[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_SCTP, RTE_FLOW_ITEM_TYPE_NVGRE, - RTE_FLOW_ITEM_TYPE_ICMP + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_PTYPE }; static enum rte_flow_item_type L4_next_items[] = { @@ -283,7 +286,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id, cnt = hns3_counter_lookup(dev, id); if (cnt) { if (!cnt->indirect || cnt->indirect != indirect) - return rte_flow_error_set(error, ENOTSUP, + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, cnt, "Counter id is used, indirect flag not match"); @@ -594,10 +597,6 @@ hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, "No support for transfer"); - if (attr->priority) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - attr, "Not support priority"); if (attr->group) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -605,6 +604,59 @@ hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) return 0; } +static int +hns3_check_tuple(const struct rte_eth_dev *dev, const struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const char * const err_msg[] = { + "Not support outer dst mac", + "Not support outer src mac", + "Not support outer vlan1 tag", + "Not support outer vlan2 tag", + "Not support outer eth type", + "Not support outer l2 rsv", + "Not support outer ip tos", + "Not support outer ip proto", + "Not support outer src ip", + "Not support outer dst ip", + "Not support outer l3 rsv", + "Not support outer src port", + "Not support outer dst port", + "Not support outer l4 rsv", + "Not support outer tun vni", + "Not support outer tun flow id", + "Not support inner dst mac", + "Not support inner src mac", + "Not support inner vlan tag1", + "Not support inner vlan tag2", + "Not support inner eth type", + "Not support inner l2 rsv", + "Not support inner ip tos", + "Not support inner ip proto", + "Not support inner src ip", + "Not support inner dst ip", + "Not support inner l3 rsv", + "Not support inner src port", + "Not support inner dst port", + "Not support inner sctp tag", + }; + struct hns3_adapter *hns = dev->data->dev_private; + uint32_t tuple_active = hns->pf.fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1].tuple_active; + uint32_t i; + + for (i = 0; i < MAX_TUPLE; i++) { + if ((rule->input_set & BIT(i)) == 0) + continue; + if (tuple_active & BIT(i)) + continue; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, err_msg[i]); + } + + return 0; +} + static int hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct rte_flow_error *error __rte_unused) @@ -1026,12 +1078,22 @@ hns3_handle_tunnel(const struct rte_flow_item *item, rule->key_conf.mask.ether_type = 0; } - /* check vlan config */ - if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2))) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Outer vlan tags is unsupported"); + if (rule->input_set & BIT(INNER_VLAN_TAG1)) { + hns3_set_bit(rule->input_set, OUTER_VLAN_TAG_FST, 1); + hns3_set_bit(rule->input_set, INNER_VLAN_TAG1, 0); + rule->key_conf.spec.outer_vlan_tag1 = rule->key_conf.spec.vlan_tag1; + rule->key_conf.mask.outer_vlan_tag1 = rule->key_conf.mask.vlan_tag1; + rule->key_conf.spec.vlan_tag1 = 0; + rule->key_conf.mask.vlan_tag1 = 0; + } + if (rule->input_set & BIT(INNER_VLAN_TAG2)) { + hns3_set_bit(rule->input_set, OUTER_VLAN_TAG_SEC, 1); + hns3_set_bit(rule->input_set, INNER_VLAN_TAG2, 0); + rule->key_conf.spec.outer_vlan_tag2 = rule->key_conf.spec.vlan_tag2; + rule->key_conf.mask.outer_vlan_tag2 = rule->key_conf.mask.vlan_tag2; + rule->key_conf.spec.vlan_tag2 = 0; + rule->key_conf.mask.vlan_tag2 = 0; + } /* clear vlan_num for inner vlan select */ rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num; @@ -1204,6 +1266,32 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, return 0; } +static int +hns3_parse_ptype(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ptype *spec = item->spec; + const struct rte_flow_item_ptype *mask = item->mask; + + if (spec == NULL || mask == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "PTYPE must set spec and mask at the same time!"); + + if (spec->packet_type != RTE_PTYPE_TUNNEL_MASK || + (mask->packet_type & RTE_PTYPE_TUNNEL_MASK) != RTE_PTYPE_TUNNEL_MASK) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, + "PTYPE only support general tunnel!"); + + /* + * Set tunnel_type to non-zero, so that meta-data's tunnel packet bit + * will be set, then hardware will match tunnel packet. + */ + rule->key_conf.spec.tunnel_type = 1; + return 0; +} + static int hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct rte_flow_error *error) @@ -1221,6 +1309,11 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, "Tunnel packets must configure " "with mask"); + if (rule->key_conf.spec.tunnel_type != 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Too many tunnel headers!"); + switch (item->type) { case RTE_FLOW_ITEM_TYPE_VXLAN: case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: @@ -1232,6 +1325,9 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, case RTE_FLOW_ITEM_TYPE_GENEVE: ret = hns3_parse_geneve(item, rule, error); break; + case RTE_FLOW_ITEM_TYPE_PTYPE: + ret = hns3_parse_ptype(item, rule, error); + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1331,11 +1427,50 @@ is_tunnel_packet(enum rte_flow_item_type type) if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || type == RTE_FLOW_ITEM_TYPE_VXLAN || type == RTE_FLOW_ITEM_TYPE_NVGRE || - type == RTE_FLOW_ITEM_TYPE_GENEVE) + type == RTE_FLOW_ITEM_TYPE_GENEVE || + /* + * Here treat PTYPE as tunnel type because driver only support PTYPE_TUNNEL, + * other PTYPE will return error in hns3_parse_ptype() later. + */ + type == RTE_FLOW_ITEM_TYPE_PTYPE) return true; return false; } +static int +hns3_handle_attributes(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct hns3_fdir_info fdir = pf->fdir; + uint32_t rule_num; + + if (fdir.index_cfg != HNS3_FDIR_INDEX_CONFIG_PRIORITY) { + if (attr->priority == 0) + return 0; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority"); + } + + rule_num = fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1]; + if (attr->priority >= rule_num) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Priority out of range"); + + if (fdir.hash_map[attr->priority] != NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Priority already exists"); + + rule->location = attr->priority; + + return 0; +} + /* * Parse the flow director rule. * The supported PATTERN: @@ -1363,6 +1498,7 @@ is_tunnel_packet(enum rte_flow_item_type type) */ static int hns3_parse_fdir_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct hns3_fdir_rule *rule, @@ -1379,6 +1515,10 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Fdir not supported in VF"); + ret = hns3_handle_attributes(dev, attr, rule, error); + if (ret) + return ret; + step_mngr.items = first_items; step_mngr.count = RTE_DIM(first_items); for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { @@ -1402,6 +1542,10 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, } } + ret = hns3_check_tuple(dev, rule, error); + if (ret) + return ret; + return hns3_handle_actions(dev, actions, rule, error); } @@ -2139,7 +2283,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return hns3_parse_rss_filter(dev, pattern, actions, &conf->rss_conf, error); - return hns3_parse_fdir_filter(dev, pattern, actions, + return hns3_parse_fdir_filter(dev, attr, pattern, actions, &conf->fdir_conf, error); } diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c index e8bc7fbc66..8a6ddbfe8c 100644 --- a/drivers/net/hns3/hns3_regs.c +++ b/drivers/net/hns3/hns3_regs.c @@ -897,7 +897,7 @@ hns3_get_name_by_module(enum hns3_reg_modules module) size_t i; for (i = 0; i < RTE_DIM(hns3_module_name_map); i++) { - if (hns3_module_name_map[i].module && HNS3_MODULE_MASK(module) != 0) + if ((hns3_module_name_map[i].module & HNS3_MODULE_MASK(module)) != 0) return hns3_module_name_map[i].name; } return "unknown"; @@ -1178,9 +1178,9 @@ hns3_direct_access_tqp_regs_help(struct hns3_hw *hw, struct rte_dev_reg_info *re uint32_t modules, enum hns3_reg_modules idx) { const struct hns3_dirt_reg_entry *reg_list; - uint16_t tqp_num, reg_offset; + uint32_t reg_num, i, j, reg_offset; uint32_t *data = regs->data; - uint32_t reg_num, i, j; + uint16_t tqp_num; if ((modules & HNS3_MODULE_MASK(idx)) == 0) return; @@ -1274,6 +1274,7 @@ hns3_get_dfx_regs(struct hns3_hw *hw, struct rte_dev_reg_info *regs, uint32_t mo if (cmd_descs == NULL) return -ENOMEM; + data += regs->length; for (i = 0; i < opcode_num; i++) { opcode = hns3_dfx_reg_opcode_list[i]; bd_num = bd_num_list[i]; @@ -1285,7 +1286,6 @@ hns3_get_dfx_regs(struct hns3_hw *hw, struct rte_dev_reg_info *regs, uint32_t mo if (ret) break; - data += regs->length; regs_num = hns3_dfx_reg_fetch_data(cmd_descs, bd_num, data); if (regs_num != hns3_reg_lists[i].entry_num) { hns3_err(hw, "Query register number differ from the list for module %s!", @@ -1294,6 +1294,7 @@ hns3_get_dfx_regs(struct hns3_hw *hw, struct rte_dev_reg_info *regs, uint32_t mo } hns3_fill_dfx_regs_name(hw, regs, hns3_reg_lists[i].reg_list, regs_num); regs->length += regs_num; + data += regs_num; } rte_free(cmd_descs); diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 5941b966e0..03bbbc435f 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -1309,6 +1309,7 @@ hns3_start_tqps(struct hns3_hw *hw) hns3_enable_all_queues(hw, true); for (i = 0; i < hw->data->nb_tx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); txq = hw->data->tx_queues[i]; if (txq->enabled) hw->data->tx_queue_state[i] = @@ -1316,6 +1317,7 @@ hns3_start_tqps(struct hns3_hw *hw) } for (i = 0; i < hw->data->nb_rx_queues; i++) { + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); rxq = hw->data->rx_queues[i]; if (rxq->enabled) hw->data->rx_queue_state[i] = diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h index 0dc6b9f0a2..bbb5478015 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -5,6 +5,8 @@ #ifndef HNS3_RXTX_VEC_NEON_H #define HNS3_RXTX_VEC_NEON_H +#include + #include #pragma GCC diagnostic ignored "-Wcast-qual" @@ -189,7 +191,7 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, if (likely(stat == 0)) bd_valid_num = HNS3_DEFAULT_DESCS_PER_LOOP; else - bd_valid_num = __builtin_ctzl(stat) / HNS3_UINT16_BIT; + bd_valid_num = rte_ctz64(stat) / HNS3_UINT16_BIT; if (bd_valid_num == 0) break; diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c index 185af67817..3e16a0d997 100644 --- a/drivers/net/i40e/base/i40e_nvm.c +++ b/drivers/net/i40e/base/i40e_nvm.c @@ -79,7 +79,7 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, if (ret_code) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM acquire type %d failed time_left=%" PRIu64 " ret=%d aq_err=%d\n", + "NVM acquire type %d failed time_left=%" PRIu32 " ret=%d aq_err=%d\n", access, time_left, ret_code, hw->aq.asq_last_status); if (ret_code && time_left) { @@ -101,7 +101,7 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, if (ret_code != I40E_SUCCESS) { hw->nvm.hw_semaphore_timeout = 0; i40e_debug(hw, I40E_DEBUG_NVM, - "NVM acquire timed out, wait %" PRIu64 " ms before trying again. status=%d aq_err=%d\n", + "NVM acquire timed out, wait %" PRIu32 " ms before trying again. status=%d aq_err=%d\n", time_left, ret_code, hw->aq.asq_last_status); } } diff --git a/drivers/net/i40e/base/meson.build b/drivers/net/i40e/base/meson.build index d94108629b..a0912b1788 100644 --- a/drivers/net/i40e/base/meson.build +++ b/drivers/net/i40e/base/meson.build @@ -11,10 +11,11 @@ sources = [ 'i40e_nvm.c', ] -error_cflags = ['-Wno-sign-compare', '-Wno-unused-value', - '-Wno-format', '-Wno-format-security', - '-Wno-format-nonliteral', - '-Wno-strict-aliasing', '-Wno-unused-but-set-variable', +error_cflags = [ + '-Wno-sign-compare', + '-Wno-unused-value', + '-Wno-strict-aliasing', + '-Wno-unused-but-set-variable', '-Wno-unused-parameter', ] c_args = cflags diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index 3a99137b5e..e1c5c7041b 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -4,7 +4,9 @@ */ #include + #include +#include #include #include @@ -558,7 +560,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq, if (unlikely(stat == 0)) { nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP; } else { - nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT; + nb_pkts_recd += rte_ctz64(stat) / I40E_UINT16_BIT; break; } } diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c index 20b656e899..04be574683 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_neon.c +++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c @@ -4,7 +4,9 @@ */ #include + #include +#include #include #include @@ -366,7 +368,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *__rte_restrict rxq, if (unlikely(stat == 0)) { nb_pkts_recd += IAVF_VPMD_DESCS_PER_LOOP; } else { - nb_pkts_recd += __builtin_ctzl(stat) / IAVF_UINT16_BIT; + nb_pkts_recd += rte_ctz64(stat) / IAVF_UINT16_BIT; break; } } diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index 27d104cc12..b48bb83438 100644 --- a/drivers/net/iavf/meson.build +++ b/drivers/net/iavf/meson.build @@ -5,8 +5,6 @@ if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0 subdir_done() endif -cflags += ['-Wno-strict-aliasing'] - includes += include_directories('../../common/iavf') testpmd_sources = files('iavf_testpmd.c') diff --git a/drivers/net/ice/base/ice_ddp.c b/drivers/net/ice/base/ice_ddp.c index c17a58eab8..850c722a3f 100644 --- a/drivers/net/ice/base/ice_ddp.c +++ b/drivers/net/ice/base/ice_ddp.c @@ -1333,7 +1333,7 @@ ice_fill_hw_ptype(struct ice_hw *hw) * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this * case. */ -enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len, bool load_sched) { bool already_loaded = false; enum ice_ddp_state state; @@ -1351,6 +1351,20 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) return state; } + if (load_sched) { + enum ice_status res = ice_cfg_tx_topo(hw, buf, len); + if (res != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, + "failed to apply sched topology (err: %d)\n", + res); + return ICE_DDP_PKG_ERR; + } + ice_debug(hw, ICE_DBG_INIT, + "Topology download successful, reinitializing device\n"); + ice_deinit_hw(hw); + ice_init_hw(hw); + } + /* initialize package info */ state = ice_init_pkg_info(hw, pkg); if (state) @@ -1423,7 +1437,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) * related routines. */ enum ice_ddp_state -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len, bool load_sched) { enum ice_ddp_state state; u8 *buf_copy; @@ -1433,7 +1447,7 @@ ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); - state = ice_init_pkg(hw, buf_copy, len); + state = ice_init_pkg(hw, buf_copy, len, load_sched); if (!ice_is_init_pkg_successful(state)) { /* Free the copy, since we failed to initialize the package */ ice_free(hw, buf_copy); diff --git a/drivers/net/ice/base/ice_ddp.h b/drivers/net/ice/base/ice_ddp.h index 5512669f44..d79cdee13a 100644 --- a/drivers/net/ice/base/ice_ddp.h +++ b/drivers/net/ice/base/ice_ddp.h @@ -454,9 +454,9 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, void * ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); -enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len, bool load_sched); enum ice_ddp_state -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len, bool load_sched); bool ice_is_init_pkg_successful(enum ice_ddp_state state); void ice_free_seg(struct ice_hw *hw); diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c index 9608ac7c24..1f520bb7c0 100644 --- a/drivers/net/ice/base/ice_sched.c +++ b/drivers/net/ice/base/ice_sched.c @@ -570,7 +570,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, * @tc: TC number * @new_numqs: number of queues */ -static int +int ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h index 9f78516dfb..09d60d02f0 100644 --- a/drivers/net/ice/base/ice_sched.h +++ b/drivers/net/ice/base/ice_sched.h @@ -270,4 +270,7 @@ int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); int ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc); + +int +ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build index 38ddde9e8c..addb922ac9 100644 --- a/drivers/net/ice/base/meson.build +++ b/drivers/net/ice/base/meson.build @@ -32,7 +32,6 @@ sources = [ ] error_cflags = [ - '-Wno-unused-value', '-Wno-unused-but-set-variable', '-Wno-unused-variable', '-Wno-unused-parameter', diff --git a/drivers/net/ice/ice_diagnose.c b/drivers/net/ice/ice_diagnose.c index c357554707..5bec9d00ad 100644 --- a/drivers/net/ice/ice_diagnose.c +++ b/drivers/net/ice/ice_diagnose.c @@ -510,7 +510,7 @@ int rte_pmd_ice_dump_switch(uint16_t port, uint8_t **buff, uint32_t *size) return ice_dump_switch(dev, buff, size); } -static void print_rl_profile(struct ice_aqc_rl_profile_elem *prof, +static void print_rl_profile(const struct ice_aqc_rl_profile_elem *prof, FILE *stream) { fprintf(stream, "\t\t\t\t\t\n"); @@ -545,29 +545,15 @@ static void print_rl_profile(struct ice_aqc_rl_profile_elem *prof, fprintf(stream, "\t\t\t\t\t\n"); } -static -void print_elem_type(FILE *stream, u8 type) +static const char * +get_elem_type(u8 type) { - switch (type) { - case 1: - fprintf(stream, "root"); - break; - case 2: - fprintf(stream, "tc"); - break; - case 3: - fprintf(stream, "se_generic"); - break; - case 4: - fprintf(stream, "entry_point"); - break; - case 5: - fprintf(stream, "leaf"); - break; - default: - fprintf(stream, "%d", type); - break; - } + static const char * const ice_sched_node_types[] = { + "Undefined", "Root", "TC", "SE Generic", "SW Entry", "Leaf" + }; + if (type < RTE_DIM(ice_sched_node_types)) + return ice_sched_node_types[type]; + return "*UNKNOWN*"; } static @@ -602,10 +588,11 @@ void print_priority_mode(FILE *stream, bool flag) } static -void print_node(struct ice_aqc_txsched_elem_data *data, - struct ice_aqc_rl_profile_elem *cir_prof, - struct ice_aqc_rl_profile_elem *eir_prof, - struct ice_aqc_rl_profile_elem *shared_prof, +void print_node(const struct rte_eth_dev_data *ethdata, + const struct ice_aqc_txsched_elem_data *data, + const struct ice_aqc_rl_profile_elem *cir_prof, + const struct ice_aqc_rl_profile_elem *eir_prof, + const struct ice_aqc_rl_profile_elem *shared_prof, bool detail, FILE *stream) { fprintf(stream, "\tNODE_%d [\n", data->node_teid); @@ -613,17 +600,18 @@ void print_node(struct ice_aqc_txsched_elem_data *data, fprintf(stream, "\t\t\t\n"); - fprintf(stream, "\t\t\t\t\n"); - fprintf(stream, "\t\t\t\t\t\n"); - fprintf(stream, "\t\t\t\t\t\n", data->node_teid); - fprintf(stream, "\t\t\t\t\n"); - - fprintf(stream, "\t\t\t\t\n"); - fprintf(stream, "\t\t\t\t\t\n"); - fprintf(stream, "\t\t\t\t\t\n"); - fprintf(stream, "\t\t\t\t\n"); + fprintf(stream, "\t\t\t\t\n", data->node_teid); + fprintf(stream, "\t\t\t\t\n", + get_elem_type(data->data.elem_type)); + if (data->data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { + for (uint16_t i = 0; i < ethdata->nb_tx_queues; i++) { + struct ice_tx_queue *q = ethdata->tx_queues[i]; + if (q->q_teid == data->node_teid) { + fprintf(stream, "\t\t\t\t\n", i); + break; + } + } + } if (!detail) goto brief; @@ -705,8 +693,6 @@ void print_node(struct ice_aqc_txsched_elem_data *data, fprintf(stream, "\t\tshape=plain\n"); fprintf(stream, "\t]\n"); - if (data->parent_teid != 0xFFFFFFFF) - fprintf(stream, "\tNODE_%d -> NODE_%d\n", data->parent_teid, data->node_teid); } static @@ -731,112 +717,92 @@ int query_rl_profile(struct ice_hw *hw, return 0; } -static -int query_node(struct ice_hw *hw, uint32_t child, uint32_t *parent, - uint8_t level, bool detail, FILE *stream) +static int +query_node(struct ice_hw *hw, struct rte_eth_dev_data *ethdata, + struct ice_sched_node *node, bool detail, FILE *stream) { - struct ice_aqc_txsched_elem_data data; + struct ice_aqc_txsched_elem_data *data = &node->info; struct ice_aqc_rl_profile_elem cir_prof; struct ice_aqc_rl_profile_elem eir_prof; struct ice_aqc_rl_profile_elem shared_prof; struct ice_aqc_rl_profile_elem *cp = NULL; struct ice_aqc_rl_profile_elem *ep = NULL; struct ice_aqc_rl_profile_elem *sp = NULL; - int status, ret; - - status = ice_sched_query_elem(hw, child, &data); - if (status != ICE_SUCCESS) { - if (level == hw->num_tx_sched_layers) { - /* ignore the error when a queue has been stopped. */ - PMD_DRV_LOG(WARNING, "Failed to query queue node %d.", child); - *parent = 0xffffffff; - return 0; - } - PMD_DRV_LOG(ERR, "Failed to query scheduling node %d.", child); - return -EINVAL; - } - - *parent = data.parent_teid; + u8 level = node->tx_sched_layer; + int ret; - if (data.data.cir_bw.bw_profile_idx != 0) { - ret = query_rl_profile(hw, level, 0, data.data.cir_bw.bw_profile_idx, &cir_prof); + if (data->data.cir_bw.bw_profile_idx != 0) { + ret = query_rl_profile(hw, level, 0, data->data.cir_bw.bw_profile_idx, &cir_prof); if (ret) return ret; cp = &cir_prof; } - if (data.data.eir_bw.bw_profile_idx != 0) { - ret = query_rl_profile(hw, level, 1, data.data.eir_bw.bw_profile_idx, &eir_prof); + if (data->data.eir_bw.bw_profile_idx != 0) { + ret = query_rl_profile(hw, level, 1, data->data.eir_bw.bw_profile_idx, &eir_prof); if (ret) return ret; ep = &eir_prof; } - if (data.data.srl_id != 0) { - ret = query_rl_profile(hw, level, 2, data.data.srl_id, &shared_prof); + if (data->data.srl_id != 0) { + ret = query_rl_profile(hw, level, 2, data->data.srl_id, &shared_prof); if (ret) return ret; sp = &shared_prof; } - print_node(&data, cp, ep, sp, detail, stream); + print_node(ethdata, data, cp, ep, sp, detail, stream); return 0; } -static -int query_nodes(struct ice_hw *hw, - uint32_t *children, int child_num, - uint32_t *parents, int *parent_num, - uint8_t level, bool detail, - FILE *stream) +static int +query_node_recursive(struct ice_hw *hw, struct rte_eth_dev_data *ethdata, + struct ice_sched_node *node, bool detail, FILE *stream) { - uint32_t parent; - int i; - int j; - - *parent_num = 0; - for (i = 0; i < child_num; i++) { - bool exist = false; - int ret; + bool close = false; + if (node->parent != NULL && node->vsi_handle != node->parent->vsi_handle) { + fprintf(stream, "subgraph cluster_%u {\n", node->vsi_handle); + fprintf(stream, "\tlabel = \"VSI %u\";\n", node->vsi_handle); + close = true; + } - ret = query_node(hw, children[i], &parent, level, detail, stream); - if (ret) - return -EINVAL; + int ret = query_node(hw, ethdata, node, detail, stream); + if (ret != 0) + return ret; - for (j = 0; j < *parent_num; j++) { - if (parents[j] == parent) { - exist = true; - break; - } + for (uint16_t i = 0; i < node->num_children; i++) { + ret = query_node_recursive(hw, ethdata, node->children[i], detail, stream); + if (ret != 0) + return ret; + /* if we have a lot of nodes, skip a bunch in the middle */ + if (node->num_children > 16 && i == 2) { + uint16_t inc = node->num_children - 5; + fprintf(stream, "\tn%d_children [label=\"... +%d child nodes ...\"];\n", + node->info.node_teid, inc); + fprintf(stream, "\tNODE_%d -> n%d_children;\n", + node->info.node_teid, node->info.node_teid); + i += inc; } - - if (!exist && parent != 0xFFFFFFFF) - parents[(*parent_num)++] = parent; } + if (close) + fprintf(stream, "}\n"); + if (node->info.parent_teid != 0xFFFFFFFF) + fprintf(stream, "\tNODE_%d -> NODE_%d\n", + node->info.parent_teid, node->info.node_teid); return 0; } -int rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream) +int +rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream) { struct rte_eth_dev *dev; struct ice_hw *hw; - struct ice_pf *pf; - struct ice_q_ctx *q_ctx; - uint16_t q_num; - uint16_t i; - struct ice_tx_queue *txq; - uint32_t buf1[256]; - uint32_t buf2[256]; - uint32_t *children = buf1; - uint32_t *parents = buf2; - int child_num = 0; - int parent_num = 0; - uint8_t level; RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); @@ -846,35 +812,9 @@ int rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream) dev = &rte_eth_devices[port]; hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - level = hw->num_tx_sched_layers; - - q_num = dev->data->nb_tx_queues; - - /* main vsi */ - for (i = 0; i < q_num; i++) { - txq = dev->data->tx_queues[i]; - q_ctx = ice_get_lan_q_ctx(hw, txq->vsi->idx, 0, i); - children[child_num++] = q_ctx->q_teid; - } - - /* fdir vsi */ - q_ctx = ice_get_lan_q_ctx(hw, pf->fdir.fdir_vsi->idx, 0, 0); - children[child_num++] = q_ctx->q_teid; fprintf(stream, "digraph tx_sched {\n"); - while (child_num > 0) { - int ret; - ret = query_nodes(hw, children, child_num, - parents, &parent_num, - level, detail, stream); - if (ret) - return ret; - - children = parents; - child_num = parent_num; - level--; - } + query_node_recursive(hw, dev->data, hw->port_info->root, detail, stream); fprintf(stream, "}\n"); return 0; diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 70298ac330..93a6308a86 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -38,6 +38,9 @@ #define ICE_ONE_PPS_OUT_ARG "pps_out" #define ICE_RX_LOW_LATENCY_ARG "rx_low_latency" #define ICE_MBUF_CHECK_ARG "mbuf_check" +#define ICE_DDP_FILENAME_ARG "ddp_pkg_file" +#define ICE_DDP_LOAD_SCHED_ARG "ddp_load_sched_topo" +#define ICE_TM_LEVELS_ARG "tm_sched_levels" #define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL @@ -54,6 +57,9 @@ static const char * const ice_valid_args[] = { ICE_RX_LOW_LATENCY_ARG, ICE_DEFAULT_MAC_DISABLE, ICE_MBUF_CHECK_ARG, + ICE_DDP_FILENAME_ARG, + ICE_DDP_LOAD_SCHED_ARG, + ICE_TM_LEVELS_ARG, NULL }; @@ -696,6 +702,18 @@ handle_field_name_arg(__rte_unused const char *key, const char *value, return 0; } +static int +handle_ddp_filename_arg(__rte_unused const char *key, const char *value, void *name_args) +{ + const char **filename = name_args; + if (strlen(value) >= ICE_MAX_PKG_FILENAME_SIZE) { + PMD_DRV_LOG(ERR, "The DDP package filename is too long : '%s'", value); + return -1; + } + *filename = strdup(value); + return 0; +} + static void ice_check_proto_xtr_support(struct ice_hw *hw) { @@ -901,7 +919,7 @@ ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info) } static int -ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi, +ice_vsi_config_tc_queue_mapping(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_aqc_vsi_props *info, uint8_t enabled_tcmap) { @@ -917,13 +935,28 @@ ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi, } /* vector 0 is reserved and 1 vector for ctrl vsi */ - if (vsi->adapter->hw.func_caps.common_cap.num_msix_vectors < 2) + if (vsi->adapter->hw.func_caps.common_cap.num_msix_vectors < 2) { vsi->nb_qps = 0; - else + } else { vsi->nb_qps = RTE_MIN ((uint16_t)vsi->adapter->hw.func_caps.common_cap.num_msix_vectors - 2, RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC)); + /* cap max QPs to what the HW reports as num-children for each layer. + * Multiply num_children for each layer from the entry_point layer to + * the qgroup, or second-last layer. + * Avoid any potential overflow by using uint32_t type and breaking loop + * once we have a number greater than the already configured max. + */ + uint32_t max_sched_vsi_nodes = 1; + for (uint8_t i = hw->sw_entry_point_layer; i < hw->num_tx_sched_layers - 1; i++) { + max_sched_vsi_nodes *= hw->max_children[i]; + if (max_sched_vsi_nodes >= vsi->nb_qps) + break; + } + vsi->nb_qps = RTE_MIN(vsi->nb_qps, max_sched_vsi_nodes); + } + /* nb_qps(hex) -> fls */ /* 0000 -> 0 */ /* 0001 -> 0 */ @@ -1695,7 +1728,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); /* Enable VLAN/UP trip */ - ret = ice_vsi_config_tc_queue_mapping(vsi, + ret = ice_vsi_config_tc_queue_mapping(hw, vsi, &vsi_ctx.info, ICE_DEFAULT_TCMAP); if (ret) { @@ -1719,7 +1752,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); vsi_ctx.info.sw_id = hw->port_info->sw_id; vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; - ret = ice_vsi_config_tc_queue_mapping(vsi, + ret = ice_vsi_config_tc_queue_mapping(hw, vsi, &vsi_ctx.info, ICE_DEFAULT_TCMAP); if (ret) { @@ -1823,6 +1856,7 @@ ice_send_driver_ver(struct ice_hw *hw) static int ice_pf_setup(struct ice_pf *pf) { + struct ice_adapter *ad = ICE_PF_TO_ADAPTER(pf); struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi; uint16_t unused; @@ -1847,6 +1881,28 @@ ice_pf_setup(struct ice_pf *pf) return -EINVAL; } + /* set the number of hidden Tx scheduler layers. If no devargs parameter to + * set the number of exposed levels, the default is to expose all levels, + * except the TC layer. + * + * If the number of exposed levels is set, we check that it's not greater + * than the HW can provide (in which case we do nothing except log a warning), + * and then set the hidden layers to be the total number of levels minus the + * requested visible number. + */ + pf->tm_conf.hidden_layers = hw->port_info->has_tc; + if (ad->devargs.tm_exposed_levels != 0) { + const uint8_t avail_layers = hw->num_tx_sched_layers - hw->port_info->has_tc; + const uint8_t req_layers = ad->devargs.tm_exposed_levels; + if (req_layers > avail_layers) { + PMD_INIT_LOG(WARNING, "The number of TM scheduler exposed levels exceeds the number of supported levels (%u)", + avail_layers); + PMD_INIT_LOG(WARNING, "Setting scheduler layers to %u", avail_layers); + } else { + pf->tm_conf.hidden_layers = hw->num_tx_sched_layers - req_layers; + } + } + pf->main_vsi = vsi; rte_spinlock_init(&pf->link_lock); @@ -1888,18 +1944,14 @@ static int ice_read_customized_path(char *pkg_file, uint16_t buff_len) } n = read(fp, pkg_file, buff_len - 1); - if (n == 0) { - close(fp); - return -EIO; + if (n > 0) { + if (pkg_file[n - 1] == '\n') + n--; + pkg_file[n] = '\0'; } - if (pkg_file[n - 1] == '\n') - n--; - - pkg_file[n] = '\0'; - close(fp); - return 0; + return n; } int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn) @@ -1912,11 +1964,22 @@ int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn) size_t bufsz; int err; + /* first read any explicitly referenced DDP file*/ + if (adapter->devargs.ddp_filename != NULL) { + strlcpy(pkg_file, adapter->devargs.ddp_filename, sizeof(pkg_file)); + if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0) { + goto load_fw; + } else { + PMD_INIT_LOG(ERR, "Cannot load DDP file: %s", pkg_file); + return -1; + } + } + memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE); snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE, "ice-%016" PRIx64 ".pkg", dsn); - if (ice_read_customized_path(customized_path, ICE_MAX_PKG_FILENAME_SIZE) == 0) { + if (ice_read_customized_path(customized_path, ICE_MAX_PKG_FILENAME_SIZE) > 0) { if (use_dsn) { snprintf(pkg_file, RTE_DIM(pkg_file), "%s/%s", customized_path, opt_ddp_filename); @@ -1957,7 +2020,7 @@ int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn) load_fw: PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file); - err = ice_copy_and_init_pkg(hw, buf, bufsz); + err = ice_copy_and_init_pkg(hw, buf, bufsz, adapter->devargs.ddp_load_sched); if (!ice_is_init_pkg_successful(err)) { PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d", err); free(buf); @@ -1989,20 +2052,19 @@ ice_base_queue_get(struct ice_pf *pf) static int parse_bool(const char *key, const char *value, void *args) { - int *i = (int *)args; - char *end; - int num; - - num = strtoul(value, &end, 10); + int *i = args; - if (num != 0 && num != 1) { - PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " - "value must be 0 or 1", + if (value == NULL || value[0] == '\0') { + PMD_DRV_LOG(WARNING, "key:\"%s\", requires a value, which must be 0 or 1", key); + return -1; + } + if (value[1] != '\0' || (value[0] != '0' && value[0] != '1')) { + PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", value must be 0 or 1", value, key); return -1; } - *i = num; + *i = (value[0] == '1'); return 0; } @@ -2025,6 +2087,32 @@ parse_u64(const char *key, const char *value, void *args) return 0; } +static int +parse_tx_sched_levels(const char *key, const char *value, void *args) +{ + uint8_t *num = args; + long tmp; + char *endptr; + + errno = 0; + tmp = strtol(value, &endptr, 0); + /* the value needs two stage validation, since the actual number of available + * levels is not known at this point. Initially just validate that it is in + * the correct range, between 3 and 8. Later validation will check that the + * available layers on a particular port is higher than the value specified here. + */ + if (errno || *endptr != '\0' || + tmp < (ICE_VSI_LAYER_OFFSET - 1) || tmp >= ICE_TM_MAX_LAYERS) { + PMD_DRV_LOG(WARNING, "%s: Invalid value \"%s\", should be in range [%d, %d]", + key, value, ICE_VSI_LAYER_OFFSET - 1, ICE_TM_MAX_LAYERS - 1); + return -1; + } + + *num = tmp; + + return 0; +} + static int lookup_pps_type(const char *pps_name) { @@ -2259,6 +2347,23 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG, &parse_bool, &ad->devargs.rx_low_latency); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_DDP_FILENAME_ARG, + &handle_ddp_filename_arg, &ad->devargs.ddp_filename); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_DDP_LOAD_SCHED_ARG, + &parse_bool, &ad->devargs.ddp_load_sched); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, ICE_TM_LEVELS_ARG, + &parse_tx_sched_levels, &ad->devargs.tm_exposed_levels); + if (ret) + goto bail; bail: rte_kvargs_free(kvlist); @@ -2762,6 +2867,8 @@ ice_dev_close(struct rte_eth_dev *dev) ice_free_hw_tbls(hw); rte_free(hw->port_info); hw->port_info = NULL; + free((void *)(uintptr_t)ad->devargs.ddp_filename); + ad->devargs.ddp_filename = NULL; ice_shutdown_all_ctrlq(hw, true); rte_free(pf->proto_xtr); pf->proto_xtr = NULL; @@ -3852,7 +3959,6 @@ ice_dev_start(struct rte_eth_dev *dev) int mask, ret; uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned; uint32_t pin_idx = ad->devargs.pin_idx; - struct rte_tm_error tm_err; ice_declare_bitmap(pmask, ICE_PROMISC_MAX); ice_zero_bitmap(pmask, ICE_PROMISC_MAX); @@ -3884,14 +3990,6 @@ ice_dev_start(struct rte_eth_dev *dev) } } - if (pf->tm_conf.committed) { - ret = ice_do_hierarchy_commit(dev, pf->tm_conf.clear_on_fail, &tm_err); - if (ret) { - PMD_DRV_LOG(ERR, "fail to commit Tx scheduler"); - goto rx_err; - } - } - ice_set_rx_function(dev); ice_set_tx_function(dev); @@ -4111,6 +4209,9 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high)) dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G; + if (ICE_PHY_TYPE_SUPPORT_200G_HIGH(phy_type_high)) + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_200G; + dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; @@ -4236,6 +4337,9 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) case ICE_AQ_LINK_SPEED_100GB: link.link_speed = RTE_ETH_SPEED_NUM_100G; break; + case ICE_AQ_LINK_SPEED_200GB: + link.link_speed = RTE_ETH_SPEED_NUM_200G; + break; case ICE_AQ_LINK_SPEED_UNKNOWN: PMD_DRV_LOG(ERR, "Unknown link speed"); link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; @@ -4262,6 +4366,8 @@ ice_parse_link_speeds(uint16_t link_speeds) { uint16_t link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; + if (link_speeds & RTE_ETH_LINK_SPEED_200G) + link_speed |= ICE_AQ_LINK_SPEED_200GB; if (link_speeds & RTE_ETH_LINK_SPEED_100G) link_speed |= ICE_AQ_LINK_SPEED_100GB; if (link_speeds & RTE_ETH_LINK_SPEED_50G) @@ -4294,7 +4400,8 @@ ice_apply_link_speed(struct rte_eth_dev *dev) struct rte_eth_conf *conf = &dev->data->dev_conf; if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { - conf->link_speeds = RTE_ETH_LINK_SPEED_100G | + conf->link_speeds = RTE_ETH_LINK_SPEED_200G | + RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_25G | @@ -6597,10 +6704,27 @@ ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - uint64_t ts_ns, tstamp; + uint64_t ts_ns, tstamp, tstamp_ready = 0; + uint64_t end_time; const uint64_t mask = 0xFFFFFFFF; int ret; + /* Set the end time with a delay of 10 microseconds */ + end_time = rte_get_timer_cycles() + (rte_get_timer_hz() / 100000); + + do { + ret = ice_get_phy_tx_tstamp_ready(hw, ad->ptp_tx_block, &tstamp_ready); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get phy ready for timestamp"); + return -1; + } + + if ((tstamp_ready & BIT_ULL(0)) == 0 && rte_get_timer_cycles() > end_time) { + PMD_DRV_LOG(ERR, "Timeout to get phy ready for timestamp"); + return -1; + } + } while ((tstamp_ready & BIT_ULL(0)) == 0); + ret = ice_read_phy_tstamp(hw, ad->ptp_tx_block, ad->ptp_tx_index, &tstamp); if (ret || tstamp == 0) { PMD_DRV_LOG(ERR, "Failed to read phy timestamp"); @@ -7135,6 +7259,9 @@ RTE_PMD_REGISTER_PARAM_STRING(net_ice, ICE_PROTO_XTR_ARG "=[queue:]" ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>" ICE_DEFAULT_MAC_DISABLE "=<0|1>" + ICE_DDP_FILENAME_ARG "=" + ICE_DDP_LOAD_SCHED_ARG "=<0|1>" + ICE_TM_LEVELS_ARG "=" ICE_RX_LOW_LATENCY_ARG "=<0|1>"); RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 57087c98ed..a5b27fabd2 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -458,6 +458,8 @@ struct ice_acl_info { TAILQ_HEAD(ice_shaper_profile_list, ice_tm_shaper_profile); TAILQ_HEAD(ice_tm_node_list, ice_tm_node); +#define ICE_TM_MAX_LAYERS ICE_SCHED_9_LAYERS + struct ice_tm_shaper_profile { TAILQ_ENTRY(ice_tm_shaper_profile) node; uint32_t shaper_profile_id; @@ -480,18 +482,11 @@ struct ice_tm_node { struct ice_sched_node *sched_node; }; -/* node type of Traffic Manager */ -enum ice_tm_node_type { - ICE_TM_NODE_TYPE_PORT, - ICE_TM_NODE_TYPE_QGROUP, - ICE_TM_NODE_TYPE_QUEUE, - ICE_TM_NODE_TYPE_MAX, -}; - /* Struct to store all the Traffic Manager configuration. */ struct ice_tm_conf { struct ice_shaper_profile_list shaper_profile_list; struct ice_tm_node *root; /* root node - port */ + uint8_t hidden_layers; /* the number of hierarchy layers hidden from app */ bool committed; bool clear_on_fail; }; @@ -564,11 +559,14 @@ struct ice_devargs { uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; uint8_t pin_idx; uint8_t pps_out_ena; + uint8_t ddp_load_sched; + uint8_t tm_exposed_levels; int xtr_field_offs; uint8_t xtr_flag_offs[PROTO_XTR_MAX]; /* Name of the field. */ char xtr_field_name[RTE_MBUF_DYN_NAMESIZE]; uint64_t mbuf_check; + const char *ddp_filename; }; /** @@ -666,7 +664,7 @@ struct ice_vsi_vlan_pvid_info { /* ICE_PF_TO */ #define ICE_PF_TO_HW(pf) \ - (&(((struct ice_pf *)pf)->adapter->hw)) + (&((pf)->adapter->hw)) #define ICE_PF_TO_ADAPTER(pf) \ ((struct ice_adapter *)(pf)->adapter) #define ICE_PF_TO_ETH_DEV(pf) \ @@ -688,9 +686,6 @@ int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, struct ice_rss_hash_cfg *cfg); void ice_tm_conf_init(struct rte_eth_dev *dev); void ice_tm_conf_uninit(struct rte_eth_dev *dev); -int ice_do_hierarchy_commit(struct rte_eth_dev *dev, - int clear_on_fail, - struct rte_tm_error *error); extern const struct rte_tm_ops ice_tm_ops; static inline int @@ -740,6 +735,18 @@ ice_align_floor(int n) ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \ ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2)) +#define ICE_PHY_TYPE_SUPPORT_200G_HIGH(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_HIGH_200G_CR4_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_SR4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_FR4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_LR4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_DR4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_KR4_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_200G_AUI8)) + __rte_experimental int rte_pmd_ice_dump_package(uint16_t port, uint8_t **buff, uint32_t *size); @@ -748,4 +755,8 @@ int rte_pmd_ice_dump_switch(uint16_t port, uint8_t **buff, uint32_t *size); __rte_experimental int rte_pmd_ice_dump_txsched(uint16_t port, bool detail, FILE *stream); + +int +ice_tm_setup_txq_node(struct ice_pf *pf, struct ice_hw *hw, uint16_t qid, uint32_t node_teid); + #endif /* _ICE_ETHDEV_H_ */ diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index d2f9edc221..0c7106c7e0 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -747,6 +747,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) int err; struct ice_vsi *vsi; struct ice_hw *hw; + struct ice_pf *pf; struct ice_aqc_add_tx_qgrp *txq_elem; struct ice_tlan_ctx tx_ctx; int buf_len; @@ -777,6 +778,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) vsi = txq->vsi; hw = ICE_VSI_TO_HW(vsi); + pf = ICE_VSI_TO_PF(vsi); memset(&tx_ctx, 0, sizeof(tx_ctx)); txq_elem->num_txqs = 1; @@ -812,6 +814,14 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /* store the schedule node id */ txq->q_teid = txq_elem->txqs[0].q_teid; + /* move the queue to correct position in hierarchy, if explicit hierarchy configured */ + if (pf->tm_conf.committed) + if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) { + PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node"); + rte_free(txq_elem); + return -EIO; + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; rte_free(txq_elem); @@ -1139,6 +1149,10 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) tx_queue_id); return -EINVAL; } + if (txq->qtx_tail == NULL) { + PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id); + return 0; + } vsi = txq->vsi; q_ids[0] = txq->reg_idx; @@ -1153,6 +1167,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } txq->tx_rel_mbufs(txq); + txq->qtx_tail = NULL; return 0; } diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index f7276cfc9f..45f25b3609 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -9,7 +9,7 @@ #define ICE_ALIGN_RING_DESC 32 #define ICE_MIN_RING_DESC 64 -#define ICE_MAX_RING_DESC 4096 +#define ICE_MAX_RING_DESC (8192 - 32) #define ICE_DMA_MEM_ALIGN 4096 #define ICE_RING_BASE_ALIGN 128 diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c index 636ab77f26..18ac324a61 100644 --- a/drivers/net/ice/ice_tm.c +++ b/drivers/net/ice/ice_tm.c @@ -1,17 +1,15 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2022 Intel Corporation */ +#include #include #include "ice_ethdev.h" #include "ice_rxtx.h" -#define MAX_CHILDREN_PER_SCHED_NODE 8 -#define MAX_CHILDREN_PER_TM_NODE 256 - static int ice_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, - __rte_unused struct rte_tm_error *error); + struct rte_tm_error *error); static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, uint32_t parent_node_id, uint32_t priority, uint32_t weight, uint32_t level_id, @@ -86,9 +84,10 @@ ice_tm_conf_uninit(struct rte_eth_dev *dev) } static int -ice_node_param_check(struct ice_pf *pf, uint32_t node_id, +ice_node_param_check(uint32_t node_id, uint32_t priority, uint32_t weight, const struct rte_tm_node_params *params, + bool is_leaf, struct rte_tm_error *error) { /* checked all the unsupported parameter */ @@ -123,7 +122,7 @@ ice_node_param_check(struct ice_pf *pf, uint32_t node_id, } /* for non-leaf node */ - if (node_id >= pf->dev_data->nb_tx_queues) { + if (!is_leaf) { if (params->nonleaf.wfq_weight_mode) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE; @@ -147,6 +146,11 @@ ice_node_param_check(struct ice_pf *pf, uint32_t node_id, } /* for leaf node */ + if (node_id >= RTE_MAX_QUEUES_PER_PORT) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "Node ID out of range for a leaf node."; + return -EINVAL; + } if (params->leaf.cman) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN; error->message = "Congestion management not supported"; @@ -193,6 +197,13 @@ find_node(struct ice_tm_node *root, uint32_t id) return NULL; } +static inline uint8_t +ice_get_leaf_level(const struct ice_pf *pf) +{ + const struct ice_hw *hw = ICE_PF_TO_HW(pf); + return hw->num_tx_sched_layers - pf->tm_conf.hidden_layers - 1; +} + static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf, struct rte_tm_error *error) @@ -217,7 +228,7 @@ ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, return -EINVAL; } - if (tm_node->level == ICE_TM_NODE_TYPE_QUEUE) + if (tm_node->level == ice_get_leaf_level(pf)) *is_leaf = true; else *is_leaf = false; @@ -393,34 +404,22 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, struct rte_tm_error *error) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_tm_shaper_profile *shaper_profile = NULL; struct ice_tm_node *tm_node; - struct ice_tm_node *parent_node; + struct ice_tm_node *parent_node = NULL; + uint8_t layer_offset = pf->tm_conf.hidden_layers; int ret; if (!params || !error) return -EINVAL; - ret = ice_node_param_check(pf, node_id, priority, weight, - params, error); - if (ret) - return ret; - - /* check if the node is already existed */ - if (find_node(pf->tm_conf.root, node_id)) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "node id already used"; - return -EINVAL; - } - /* check the shaper profile id */ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { - shaper_profile = ice_shaper_profile_search(dev, - params->shaper_profile_id); + shaper_profile = ice_shaper_profile_search(dev, params->shaper_profile_id); if (!shaper_profile) { - error->type = - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; - error->message = "shaper profile not exist"; + error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID; + error->message = "shaper profile does not exist"; return -EINVAL; } } @@ -428,9 +427,9 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, /* root node if not have a parent */ if (parent_node_id == RTE_TM_NODE_ID_NULL) { /* check level */ - if (level_id != ICE_TM_NODE_TYPE_PORT) { + if (level_id != 0) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; - error->message = "Wrong level"; + error->message = "Wrong level, root node (NULL parent) must be at level 0"; return -EINVAL; } @@ -441,74 +440,75 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, return -EINVAL; } + ret = ice_node_param_check(node_id, priority, weight, params, false, error); + if (ret) + return ret; + /* add the root node */ tm_node = rte_zmalloc(NULL, - sizeof(struct ice_tm_node) + - sizeof(struct ice_tm_node *) * MAX_CHILDREN_PER_TM_NODE, - 0); + sizeof(struct ice_tm_node) + + sizeof(struct ice_tm_node *) * hw->max_children[layer_offset], + 0); if (!tm_node) return -ENOMEM; tm_node->id = node_id; - tm_node->level = ICE_TM_NODE_TYPE_PORT; + tm_node->level = 0; tm_node->parent = NULL; tm_node->reference_count = 0; tm_node->shaper_profile = shaper_profile; - tm_node->children = - (void *)((uint8_t *)tm_node + sizeof(struct ice_tm_node)); - rte_memcpy(&tm_node->params, params, - sizeof(struct rte_tm_node_params)); + tm_node->children = RTE_PTR_ADD(tm_node, sizeof(struct ice_tm_node)); + tm_node->params = *params; pf->tm_conf.root = tm_node; return 0; } - /* check the parent node */ parent_node = find_node(pf->tm_conf.root, parent_node_id); if (!parent_node) { error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; error->message = "parent not exist"; return -EINVAL; } - if (parent_node->level != ICE_TM_NODE_TYPE_PORT && - parent_node->level != ICE_TM_NODE_TYPE_QGROUP) { - error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; - error->message = "parent is not valid"; - return -EINVAL; - } + /* check level */ - if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && - level_id != parent_node->level + 1) { + if (level_id == RTE_TM_NODE_LEVEL_ID_ANY) + level_id = parent_node->level + 1; + else if (level_id != parent_node->level + 1) { error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS; error->message = "Wrong level"; return -EINVAL; } - /* check the node number */ - if (parent_node->level == ICE_TM_NODE_TYPE_PORT) { - /* check the queue group number */ - if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "too many queue groups"; - return -EINVAL; - } - } else { - /* check the queue number */ - if (parent_node->reference_count >= - MAX_CHILDREN_PER_SCHED_NODE) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "too many queues"; - return -EINVAL; - } - if (node_id >= pf->dev_data->nb_tx_queues) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "too large queue id"; - return -EINVAL; - } + ret = ice_node_param_check(node_id, priority, weight, + params, level_id == ice_get_leaf_level(pf), error); + if (ret) + return ret; + + /* check if the node is already existed */ + if (find_node(pf->tm_conf.root, node_id)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "node id already used"; + return -EINVAL; + } + + /* check the parent node */ + /* for n-level hierarchy, level n-1 is leaf, so last level with children is n-2 */ + if ((int)parent_node->level > hw->num_tx_sched_layers - 2) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "parent is not valid"; + return -EINVAL; + } + + /* check the max children allowed at this level */ + if (parent_node->reference_count >= hw->max_children[parent_node->level]) { + error->type = RTE_TM_ERROR_TYPE_CAPABILITIES; + error->message = "insufficient number of child nodes supported"; + return -EINVAL; } tm_node = rte_zmalloc(NULL, - sizeof(struct ice_tm_node) + - sizeof(struct ice_tm_node *) * MAX_CHILDREN_PER_TM_NODE, - 0); + sizeof(struct ice_tm_node) + + sizeof(struct ice_tm_node *) * hw->max_children[level_id + layer_offset], + 0); if (!tm_node) return -ENOMEM; tm_node->id = node_id; @@ -516,25 +516,18 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->weight = weight; tm_node->reference_count = 0; tm_node->parent = parent_node; - tm_node->level = parent_node->level + 1; + tm_node->level = level_id; tm_node->shaper_profile = shaper_profile; - tm_node->children = - (void *)((uint8_t *)tm_node + sizeof(struct ice_tm_node)); - tm_node->parent->children[tm_node->parent->reference_count] = tm_node; + tm_node->children = RTE_PTR_ADD(tm_node, sizeof(struct ice_tm_node)); + tm_node->parent->children[tm_node->parent->reference_count++] = tm_node; + tm_node->params = *params; - if (tm_node->priority != 0 && level_id != ICE_TM_NODE_TYPE_QUEUE && - level_id != ICE_TM_NODE_TYPE_QGROUP) - PMD_DRV_LOG(WARNING, "priority != 0 not supported in level %d", - level_id); + if (tm_node->priority != 0) + PMD_DRV_LOG(WARNING, "priority != 0 not supported in level %d", level_id); - if (tm_node->weight != 1 && - level_id != ICE_TM_NODE_TYPE_QUEUE && level_id != ICE_TM_NODE_TYPE_QGROUP) - PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d", - level_id); + if (tm_node->weight != 1 && level_id == 0) + PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d", level_id); - rte_memcpy(&tm_node->params, params, - sizeof(struct rte_tm_node_params)); - tm_node->parent->reference_count++; return 0; } @@ -573,7 +566,7 @@ ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id, } /* root node */ - if (tm_node->level == ICE_TM_NODE_TYPE_PORT) { + if (tm_node->level == 0) { rte_free(tm_node); pf->tm_conf.root = NULL; return 0; @@ -593,53 +586,6 @@ ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id, return 0; } -static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, - struct ice_sched_node *queue_sched_node, - struct ice_sched_node *dst_node, - uint16_t queue_id) -{ - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ice_aqc_move_txqs_data *buf; - struct ice_sched_node *queue_parent_node; - uint8_t txqs_moved; - int ret = ICE_SUCCESS; - uint16_t buf_size = ice_struct_size(buf, txqs, 1); - - buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf)); - if (buf == NULL) - return -ENOMEM; - - queue_parent_node = queue_sched_node->parent; - buf->src_teid = queue_parent_node->info.node_teid; - buf->dest_teid = dst_node->info.node_teid; - buf->txqs[0].q_teid = queue_sched_node->info.node_teid; - buf->txqs[0].txq_id = queue_id; - - ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50, - NULL, buf, buf_size, &txqs_moved, NULL); - if (ret || txqs_moved == 0) { - PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id); - rte_free(buf); - return ICE_ERR_PARAM; - } - - if (queue_parent_node->num_children > 0) { - queue_parent_node->num_children--; - queue_parent_node->children[queue_parent_node->num_children] = NULL; - } else { - PMD_DRV_LOG(ERR, "invalid children number %d for queue %u", - queue_parent_node->num_children, queue_id); - rte_free(buf); - return ICE_ERR_PARAM; - } - dst_node->children[dst_node->num_children++] = queue_sched_node; - queue_sched_node->parent = dst_node; - ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info); - - rte_free(buf); - return ret; -} - static int ice_set_node_rate(struct ice_hw *hw, struct ice_tm_node *tm_node, struct ice_sched_node *sched_node) @@ -727,240 +673,198 @@ static int ice_cfg_hw_node(struct ice_hw *hw, return 0; } -static struct ice_sched_node *ice_get_vsi_node(struct ice_hw *hw) +int +ice_tm_setup_txq_node(struct ice_pf *pf, struct ice_hw *hw, uint16_t qid, uint32_t teid) { - struct ice_sched_node *node = hw->port_info->root; - uint32_t vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - uint32_t i; - - for (i = 0; i < vsi_layer; i++) - node = node->children[0]; - - return node; -} + struct ice_sched_node *hw_node = ice_sched_find_node_by_teid(hw->port_info->root, teid); + struct ice_tm_node *sw_node = find_node(pf->tm_conf.root, qid); -static int ice_reset_noleaf_nodes(struct rte_eth_dev *dev) -{ - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ice_sched_node *vsi_node = ice_get_vsi_node(hw); - struct ice_tm_node *root = pf->tm_conf.root; - uint32_t i; - int ret; - - /* reset vsi_node */ - ret = ice_set_node_rate(hw, NULL, vsi_node); - if (ret) { - PMD_DRV_LOG(ERR, "reset vsi node failed"); - return ret; - } - - if (root == NULL) + /* not configured in hierarchy */ + if (sw_node == NULL) return 0; - for (i = 0; i < root->reference_count; i++) { - struct ice_tm_node *tm_node = root->children[i]; + sw_node->sched_node = hw_node; - if (tm_node->sched_node == NULL) - continue; + /* if the queue node has been put in the wrong place in hierarchy */ + if (hw_node->parent != sw_node->parent->sched_node) { + struct ice_aqc_move_txqs_data *buf; + uint8_t txqs_moved = 0; + uint16_t buf_size = ice_struct_size(buf, txqs, 1); - ret = ice_cfg_hw_node(hw, NULL, tm_node->sched_node); - if (ret) { - PMD_DRV_LOG(ERR, "reset queue group node %u failed", tm_node->id); - return ret; + buf = ice_malloc(hw, buf_size); + if (buf == NULL) + return -ENOMEM; + + struct ice_sched_node *parent = hw_node->parent; + struct ice_sched_node *new_parent = sw_node->parent->sched_node; + buf->src_teid = parent->info.node_teid; + buf->dest_teid = new_parent->info.node_teid; + buf->txqs[0].q_teid = hw_node->info.node_teid; + buf->txqs[0].txq_id = qid; + + int ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50, + NULL, buf, buf_size, &txqs_moved, NULL); + if (ret || txqs_moved == 0) { + PMD_DRV_LOG(ERR, "move lan queue %u failed", qid); + ice_free(hw, buf); + return ICE_ERR_PARAM; } - tm_node->sched_node = NULL; + + /* now update the ice_sched_nodes to match physical layout */ + new_parent->children[new_parent->num_children++] = hw_node; + hw_node->parent = new_parent; + ice_sched_query_elem(hw, hw_node->info.node_teid, &hw_node->info); + for (uint16_t i = 0; i < parent->num_children; i++) + if (parent->children[i] == hw_node) { + /* to remove, just overwrite the old node slot with the last ptr */ + parent->children[i] = parent->children[--parent->num_children]; + break; + } } - return 0; + return ice_cfg_hw_node(hw, sw_node, hw_node); } -static int ice_remove_leaf_nodes(struct rte_eth_dev *dev) +/* from a given node, recursively deletes all the nodes that belong to that vsi. + * Any nodes which can't be deleted because they have children belonging to a different + * VSI, are now also adjusted to belong to that VSI also + */ +static int +free_sched_node_recursive(struct ice_port_info *pi, const struct ice_sched_node *root, + struct ice_sched_node *node, uint8_t vsi_id) { - int ret = 0; - int i; + uint16_t i = 0; - for (i = 0; i < dev->data->nb_tx_queues; i++) { - ret = ice_tx_queue_stop(dev, i); - if (ret) { - PMD_DRV_LOG(ERR, "stop queue %u failed", i); - break; + while (i < node->num_children) { + if (node->children[i]->vsi_handle != vsi_id) { + i++; + continue; } + free_sched_node_recursive(pi, root, node->children[i], vsi_id); } - return ret; -} - -static int ice_add_leaf_nodes(struct rte_eth_dev *dev) -{ - int ret = 0; - int i; - - for (i = 0; i < dev->data->nb_tx_queues; i++) { - ret = ice_tx_queue_start(dev, i); - if (ret) { - PMD_DRV_LOG(ERR, "start queue %u failed", i); - break; - } + if (node != root) { + if (node->num_children == 0) + ice_free_sched_node(pi, node); + else + node->vsi_handle = node->children[0]->vsi_handle; } - return ret; + return 0; } -int ice_do_hierarchy_commit(struct rte_eth_dev *dev, - int clear_on_fail, - struct rte_tm_error *error) +static int +create_sched_node_recursive(struct ice_pf *pf, struct ice_port_info *pi, + struct ice_tm_node *sw_node, struct ice_sched_node *hw_root, uint16_t *created) { - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ice_tm_node *root; - struct ice_sched_node *vsi_node = NULL; - struct ice_sched_node *queue_node; - struct ice_tx_queue *txq; - int ret_val = 0; - uint32_t i; - uint32_t idx_vsi_child; - uint32_t idx_qg; - uint32_t nb_vsi_child; - uint32_t nb_qg; - uint32_t qid; - uint32_t q_teid; - - /* remove leaf nodes */ - ret_val = ice_remove_leaf_nodes(dev); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "reset no-leaf nodes failed"); - goto fail_clear; - } - - /* reset no-leaf nodes. */ - ret_val = ice_reset_noleaf_nodes(dev); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "reset leaf nodes failed"); - goto add_leaf; - } - - /* config vsi node */ - vsi_node = ice_get_vsi_node(hw); - root = pf->tm_conf.root; - - ret_val = ice_set_node_rate(hw, root, vsi_node); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, - "configure vsi node %u bandwidth failed", - root->id); - goto add_leaf; - } - - /* config queue group nodes */ - nb_vsi_child = vsi_node->num_children; - nb_qg = vsi_node->children[0]->num_children; - - idx_vsi_child = 0; - idx_qg = 0; - - if (root == NULL) - goto commit; - - for (i = 0; i < root->reference_count; i++) { - struct ice_tm_node *tm_node = root->children[i]; - struct ice_tm_node *tm_child_node; - struct ice_sched_node *qgroup_sched_node = - vsi_node->children[idx_vsi_child]->children[idx_qg]; - uint32_t j; - - ret_val = ice_cfg_hw_node(hw, tm_node, qgroup_sched_node); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, - "configure queue group node %u failed", - tm_node->id); - goto reset_leaf; - } - - for (j = 0; j < tm_node->reference_count; j++) { - tm_child_node = tm_node->children[j]; - qid = tm_child_node->id; - ret_val = ice_tx_queue_start(dev, qid); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "start queue %u failed", qid); - goto reset_leaf; - } - txq = dev->data->tx_queues[qid]; - q_teid = txq->q_teid; - queue_node = ice_sched_get_node(hw->port_info, q_teid); - if (queue_node == NULL) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "get queue %u node failed", qid); - goto reset_leaf; - } - if (queue_node->info.parent_teid != qgroup_sched_node->info.node_teid) { - ret_val = ice_move_recfg_lan_txq(dev, queue_node, - qgroup_sched_node, qid); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "move queue %u failed", qid); - goto reset_leaf; - } - } - ret_val = ice_cfg_hw_node(hw, tm_child_node, queue_node); - if (ret_val) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, - "configure queue group node %u failed", - tm_node->id); - goto reset_leaf; - } + struct ice_sched_node *parent = sw_node->sched_node; + uint32_t teid; + uint16_t added; + + /* first create all child nodes */ + for (uint16_t i = 0; i < sw_node->reference_count; i++) { + struct ice_tm_node *tm_node = sw_node->children[i]; + int res = ice_sched_add_elems(pi, hw_root, + parent, parent->tx_sched_layer + 1, + 1 /* num nodes */, &added, &teid, + NULL /* no pre-alloc */); + if (res != 0) { + PMD_DRV_LOG(ERR, "Error with ice_sched_add_elems, adding child node to teid %u", + parent->info.node_teid); + return -1; } - - idx_qg++; - if (idx_qg >= nb_qg) { - idx_qg = 0; - idx_vsi_child++; - } - if (idx_vsi_child >= nb_vsi_child) { - error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; - PMD_DRV_LOG(ERR, "too many queues"); - goto reset_leaf; + struct ice_sched_node *hw_node = ice_sched_find_node_by_teid(parent, teid); + if (ice_cfg_hw_node(pi->hw, tm_node, hw_node) != 0) { + PMD_DRV_LOG(ERR, "Error configuring node %u at layer %u", + teid, parent->tx_sched_layer + 1); + return -1; } + tm_node->sched_node = hw_node; + created[hw_node->tx_sched_layer]++; } -commit: - pf->tm_conf.committed = true; - pf->tm_conf.clear_on_fail = clear_on_fail; + /* if we have just created the child nodes in the q-group, i.e. last non-leaf layer, + * then just return, rather than trying to create leaf nodes. + * That is done later at queue start. + */ + if (sw_node->level + 2 == ice_get_leaf_level(pf)) + return 0; - return ret_val; + for (uint16_t i = 0; i < sw_node->reference_count; i++) { + if (sw_node->children[i]->reference_count == 0) + continue; -reset_leaf: - ice_remove_leaf_nodes(dev); -add_leaf: - ice_add_leaf_nodes(dev); - ice_reset_noleaf_nodes(dev); -fail_clear: - /* clear all the traffic manager configuration */ - if (clear_on_fail) { - ice_tm_conf_uninit(dev); - ice_tm_conf_init(dev); + if (create_sched_node_recursive(pf, pi, sw_node->children[i], hw_root, created) < 0) + return -1; } - return ret_val; + return 0; } -static int ice_hierarchy_commit(struct rte_eth_dev *dev, +static int +commit_new_hierarchy(struct rte_eth_dev *dev) +{ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_port_info *pi = hw->port_info; + struct ice_tm_node *sw_root = pf->tm_conf.root; + const uint16_t new_root_level = pf->tm_conf.hidden_layers; + /* count nodes per hw level, not per logical */ + uint16_t nodes_created_per_level[ICE_TM_MAX_LAYERS] = {0}; + uint8_t q_lvl = ice_get_leaf_level(pf); + uint8_t qg_lvl = q_lvl - 1; + + struct ice_sched_node *new_vsi_root = hw->vsi_ctx[pf->main_vsi->idx]->sched.vsi_node[0]; + while (new_vsi_root->tx_sched_layer > new_root_level) + new_vsi_root = new_vsi_root->parent; + + free_sched_node_recursive(pi, new_vsi_root, new_vsi_root, new_vsi_root->vsi_handle); + + sw_root->sched_node = new_vsi_root; + if (create_sched_node_recursive(pf, pi, sw_root, new_vsi_root, nodes_created_per_level) < 0) + return -1; + for (uint16_t i = 0; i < RTE_DIM(nodes_created_per_level); i++) + PMD_DRV_LOG(DEBUG, "Created %u nodes at level %u", + nodes_created_per_level[i], i); + hw->vsi_ctx[pf->main_vsi->idx]->sched.vsi_node[0] = new_vsi_root; + + pf->main_vsi->nb_qps = + RTE_MIN(nodes_created_per_level[qg_lvl] * hw->max_children[qg_lvl], + hw->layer_info[q_lvl].max_device_nodes); + + pf->tm_conf.committed = true; /* set flag to be checks on queue start */ + + return ice_alloc_lan_q_ctx(hw, 0, 0, pf->main_vsi->nb_qps); +} + +static int +ice_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, struct rte_tm_error *error) { - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool restart = false; + + /* commit should only be done to topology before start + * If port is already started, stop it and then restart when done. + */ + if (dev->data->dev_started) { + if (rte_eth_dev_stop(dev->data->port_id) != 0) { + error->message = "Device failed to Stop"; + return -1; + } + restart = true; + } - /* if device not started, simply set committed flag and return. */ - if (!dev->data->dev_started) { - pf->tm_conf.committed = true; - pf->tm_conf.clear_on_fail = clear_on_fail; - return 0; + int ret = commit_new_hierarchy(dev); + if (ret < 0 && clear_on_fail) { + ice_tm_conf_uninit(dev); + ice_tm_conf_init(dev); } - return ice_do_hierarchy_commit(dev, clear_on_fail, error); + if (restart) { + if (rte_eth_dev_start(dev->data->port_id) != 0) { + error->message = "Device failed to Start"; + return -1; + } + } + return ret; } diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c index d0cee1b016..fabab5b1a3 100644 --- a/drivers/net/igc/igc_txrx.c +++ b/drivers/net/igc/igc_txrx.c @@ -347,6 +347,13 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->data_off = RTE_PKTMBUF_HEADROOM; data_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len; + /* + * When the RTE_ETH_RX_OFFLOAD_TIMESTAMP offload is enabled the + * length in the descriptor still accounts for the timestamp so + * it must be subtracted. + */ + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) + data_len -= IGC_TS_HDR_LEN; rxm->data_len = data_len; rxm->pkt_len = data_len; rxm->nb_segs = 1; @@ -509,6 +516,24 @@ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm->data_off = RTE_PKTMBUF_HEADROOM; data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { + /* + * When the RTE_ETH_RX_OFFLOAD_TIMESTAMP offload is enabled + * the pkt_addr of all software ring entries is moved forward + * by IGC_TS_HDR_LEN (see igc_alloc_rx_queue_mbufs()) so that + * when the hardware writes the packet with a prepended + * timestamp the actual packet data still starts at the + * normal data offset. The length in the descriptor still + * accounts for the timestamp so it needs to be subtracted. + * Follow-up mbufs do not have the timestamp so the data + * offset must be adjusted to point to the start of the packet + * data. + */ + if (first_seg == NULL) + data_len -= IGC_TS_HDR_LEN; + else + rxm->data_off -= IGC_TS_HDR_LEN; + } rxm->data_len = data_len; /* @@ -557,6 +582,7 @@ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, last_seg->data_len = last_seg->data_len - (RTE_ETHER_CRC_LEN - data_len); last_seg->next = NULL; + rxm = last_seg; } else { rxm->data_len = (uint16_t) (data_len - RTE_ETHER_CRC_LEN); diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index ab37c37469..eb431889c3 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -73,7 +73,7 @@ #define IXGBE_MMW_SIZE_DEFAULT 0x4 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 -#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ +#define IXGBE_MAX_RING_DESC 8192 /* replicate define from rxtx */ /* * Default values for RX/TX configuration @@ -3385,7 +3385,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->opackets = hw_stats->gptc; stats->obytes = hw_stats->gotc; - for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + for (i = 0; i < RTE_MIN_T(IXGBE_QUEUE_STAT_COUNTERS, + RTE_ETHDEV_QUEUE_STAT_CNTRS, typeof(i)); i++) { stats->q_ipackets[i] = hw_stats->qprc[i]; stats->q_opackets[i] = hw_stats->qptc[i]; stats->q_ibytes[i] = hw_stats->qbrc[i]; @@ -4314,11 +4315,6 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) wait = 0; -/* BSD has no interrupt mechanism, so force NIC status synchronization. */ -#ifdef RTE_EXEC_ENV_FREEBSD - wait = 1; -#endif - if (vf) diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); else @@ -5978,7 +5974,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || (hw->mac.type == ixgbe_mac_X550EM_a) || - (hw->mac.type == ixgbe_mac_X550EM_x)) { + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_E610)) { if (direction == -1) { /* other causes */ idx = ((queue & 1) * 8); @@ -6113,6 +6110,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_E610: ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); break; default: diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index ee89c89929..0550c1da60 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -25,7 +25,7 @@ * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 */ #define IXGBE_MIN_RING_DESC 32 -#define IXGBE_MAX_RING_DESC 4096 +#define IXGBE_MAX_RING_DESC 8192 #define RTE_PMD_IXGBE_TX_MAX_BURST 32 #define RTE_PMD_IXGBE_RX_MAX_BURST 32 diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c index 272a28bcba..40931ac027 100644 --- a/drivers/net/mana/tx.c +++ b/drivers/net/mana/tx.c @@ -154,6 +154,7 @@ mana_start_tx_queues(struct rte_eth_dev *dev) txq->gdma_cq.count, txq->gdma_cq.size, txq->gdma_cq.head); + __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; } diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c index cd722f254f..c0bf1e3bbf 100644 --- a/drivers/net/memif/rte_eth_memif.c +++ b/drivers/net/memif/rte_eth_memif.c @@ -520,7 +520,10 @@ eth_memif_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) return 0; if (unlikely(ring == NULL)) { /* Secondary process will attempt to request regions. */ - rte_eth_link_get(mq->in_port, &link); + ret = rte_eth_link_get(mq->in_port, &link); + if (ret < 0) + MIF_LOG(ERR, "Failed to get port %u link info: %s", + mq->in_port, rte_strerror(-ret)); return 0; } @@ -868,8 +871,13 @@ eth_memif_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0)) return 0; if (unlikely(ring == NULL)) { + int ret; + /* Secondary process will attempt to request regions. */ - rte_eth_link_get(mq->in_port, &link); + ret = rte_eth_link_get(mq->in_port, &link); + if (ret < 0) + MIF_LOG(ERR, "Failed to get port %u link info: %s", + mq->in_port, rte_strerror(-ret)); return 0; } diff --git a/drivers/net/meson.build b/drivers/net/meson.build index fb6d34b782..0a12914534 100644 --- a/drivers/net/meson.build +++ b/drivers/net/meson.build @@ -62,6 +62,7 @@ drivers = [ 'vhost', 'virtio', 'vmxnet3', + 'zxdh', ] std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h index 0fe39e9c76..1b58eeb2c7 100644 --- a/drivers/net/mlx5/hws/mlx5dr.h +++ b/drivers/net/mlx5/hws/mlx5dr.h @@ -52,6 +52,7 @@ enum mlx5dr_action_type { MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT, MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT, MLX5DR_ACTION_TYP_NAT64, + MLX5DR_ACTION_TYP_JUMP_TO_MATCHER, MLX5DR_ACTION_TYP_MAX, }; @@ -130,6 +131,14 @@ enum mlx5dr_matcher_distribute_mode { MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1, }; +/* Match mode describes the behavior of the matcher STE's when a packet arrives */ +enum mlx5dr_matcher_match_mode { + /* Packet arriving at this matcher STE's will match according it's tag and match definer */ + MLX5DR_MATCHER_MATCH_MODE_DEFAULT = 0x0, + /* Packet arriving at this matcher STE's will always hit and perform the actions */ + MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT = 0x1, +}; + enum mlx5dr_rule_hash_calc_mode { MLX5DR_RULE_HASH_CALC_MODE_RAW, MLX5DR_RULE_HASH_CALC_MODE_IDX, @@ -144,11 +153,14 @@ struct mlx5dr_matcher_attr { enum mlx5dr_matcher_resource_mode mode; /* Optimize insertion in case packet origin is the same for all rules */ enum mlx5dr_matcher_flow_src optimize_flow_src; - /* Define the insertion and distribution modes for this matcher */ + /* Define the insertion, distribution and match modes for this matcher */ enum mlx5dr_matcher_insert_mode insert_mode; enum mlx5dr_matcher_distribute_mode distribute_mode; + enum mlx5dr_matcher_match_mode match_mode; /* Define whether the created matcher supports resizing into a bigger matcher */ bool resizable; + /* This will imply that this matcher is not part of the matchers chain of parent table */ + bool isolated; union { struct { uint8_t sz_row_log; @@ -276,6 +288,10 @@ struct mlx5dr_rule_action { uint32_t offset; enum mlx5dr_action_aso_ct_flags direction; } aso_ct; + + struct { + uint32_t offset; + } jump_to_matcher; }; }; @@ -293,6 +309,15 @@ struct mlx5dr_action_dest_attr { } reformat; }; +enum mlx5dr_action_jump_to_matcher_type { + MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX, +}; + +struct mlx5dr_action_jump_to_matcher_attr { + enum mlx5dr_action_jump_to_matcher_type type; + struct mlx5dr_matcher *matcher; +}; + union mlx5dr_crc_encap_entropy_hash_ip_field { uint8_t ipv6_addr[16]; struct { @@ -927,6 +952,21 @@ mlx5dr_action_create_nat64(struct mlx5dr_context *ctx, struct mlx5dr_action_nat64_attr *attr, uint32_t flags); +/* Create direct rule jump to matcher action. + * + * @param[in] ctx + * The context in which the new action will be created. + * @param[in] attr + * The relevant attribute of the action. + * @param[in] flags + * Action creation flags. (enum mlx5dr_action_flags) + * @return pointer to mlx5dr_action on success NULL otherwise. + */ +struct mlx5dr_action * +mlx5dr_action_create_jump_to_matcher(struct mlx5dr_context *ctx, + struct mlx5dr_action_jump_to_matcher_attr *attr, + uint32_t flags); + /* Destroy direct rule action. * * @param[in] action diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c index 3fceb96de2..696b160011 100644 --- a/drivers/net/mlx5/hws/mlx5dr_action.c +++ b/drivers/net/mlx5/hws/mlx5dr_action.c @@ -42,7 +42,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_ BIT(MLX5DR_ACTION_TYP_TIR) | BIT(MLX5DR_ACTION_TYP_DROP) | BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | - BIT(MLX5DR_ACTION_TYP_DEST_ARRAY), + BIT(MLX5DR_ACTION_TYP_DEST_ARRAY) | + BIT(MLX5DR_ACTION_TYP_JUMP_TO_MATCHER), BIT(MLX5DR_ACTION_TYP_LAST), }, [MLX5DR_TABLE_TYPE_NIC_TX] = { @@ -62,7 +63,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_ BIT(MLX5DR_ACTION_TYP_TBL) | BIT(MLX5DR_ACTION_TYP_MISS) | BIT(MLX5DR_ACTION_TYP_DROP) | - BIT(MLX5DR_ACTION_TYP_DEST_ROOT), + BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | + BIT(MLX5DR_ACTION_TYP_JUMP_TO_MATCHER), BIT(MLX5DR_ACTION_TYP_LAST), }, [MLX5DR_TABLE_TYPE_FDB] = { @@ -88,7 +90,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_ BIT(MLX5DR_ACTION_TYP_VPORT) | BIT(MLX5DR_ACTION_TYP_DROP) | BIT(MLX5DR_ACTION_TYP_DEST_ROOT) | - BIT(MLX5DR_ACTION_TYP_DEST_ARRAY), + BIT(MLX5DR_ACTION_TYP_DEST_ARRAY) | + BIT(MLX5DR_ACTION_TYP_JUMP_TO_MATCHER), BIT(MLX5DR_ACTION_TYP_LAST), }, }; @@ -617,7 +620,8 @@ mlx5dr_action_create_nat64_copy_back_state(struct mlx5dr_context *ctx, MLX5_SET(copy_action_in, action_ptr, src_field, attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); MLX5_SET(copy_action_in, action_ptr, dst_field, tos_field); - MLX5_SET(copy_action_in, action_ptr, src_offset, 24); + MLX5_SET(copy_action_in, action_ptr, src_offset, 24 + (ecn ? + MLX5DR_ACTION_NAT64_ECN_SIZE : 0)); MLX5_SET(copy_action_in, action_ptr, length, tos_size); action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; @@ -629,7 +633,7 @@ mlx5dr_action_create_nat64_copy_back_state(struct mlx5dr_context *ctx, MLX5_SET(copy_action_in, action_ptr, src_field, attr->registers[MLX5DR_ACTION_NAT64_REG_CONTROL]); MLX5_SET(copy_action_in, action_ptr, dst_field, ecn); - MLX5_SET(copy_action_in, action_ptr, src_offset, 24 + tos_size); + MLX5_SET(copy_action_in, action_ptr, src_offset, 24); MLX5_SET(copy_action_in, action_ptr, length, MLX5DR_ACTION_NAT64_ECN_SIZE); action_ptr += MLX5DR_ACTION_DOUBLE_SIZE; } @@ -1091,6 +1095,13 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action, attr->action_offset = MLX5DR_ACTION_OFFSET_DW5; attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS; break; + case MLX5DR_ACTION_TYP_JUMP_TO_MATCHER: + attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; + attr->action_offset = MLX5DR_ACTION_OFFSET_HIT; + attr->ste_table.ste = action->jump_to_matcher.matcher->match_ste.ste; + attr->ste_table.ste_pool = action->jump_to_matcher.matcher->match_ste.pool; + attr->ste_table.match_definer_id = action->ctx->caps->trivial_match_definer; + break; default: DR_LOG(ERR, "Invalid action type %d", action->type); assert(false); @@ -3078,6 +3089,57 @@ mlx5dr_action_create_nat64(struct mlx5dr_context *ctx, return NULL; } +struct mlx5dr_action * +mlx5dr_action_create_jump_to_matcher(struct mlx5dr_context *ctx, + struct mlx5dr_action_jump_to_matcher_attr *attr, + uint32_t flags) +{ + struct mlx5dr_matcher *matcher = attr->matcher; + struct mlx5dr_matcher_attr *m_attr; + struct mlx5dr_action *action; + + if (attr->type != MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX) { + DR_LOG(ERR, "Only jump to matcher by index is supported"); + goto enotsup; + } + + if (mlx5dr_action_is_root_flags(flags)) { + DR_LOG(ERR, "Action flags must be only non root (HWS)"); + goto enotsup; + } + + if (mlx5dr_table_is_root(matcher->tbl)) { + DR_LOG(ERR, "Root matcher cannot be set as destination"); + goto enotsup; + } + + m_attr = &matcher->attr; + + if (!(matcher->flags & MLX5DR_MATCHER_FLAGS_STE_ARRAY) && + (m_attr->resizable || m_attr->table.sz_col_log || m_attr->table.sz_row_log)) { + DR_LOG(ERR, "Only STE array or matcher of size 1 can be set as destination"); + goto enotsup; + } + + action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_JUMP_TO_MATCHER); + if (!action) + return NULL; + + action->jump_to_matcher.matcher = matcher; + + if (mlx5dr_action_create_stcs(action, NULL)) { + DR_LOG(ERR, "Failed to create action jump to matcher STC"); + simple_free(action); + return NULL; + } + + return action; + +enotsup: + rte_errno = ENOTSUP; + return NULL; +} + static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) { struct mlx5dr_devx_obj *obj = NULL; @@ -3100,6 +3162,7 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) case MLX5DR_ACTION_TYP_PUSH_VLAN: case MLX5DR_ACTION_TYP_REMOVE_HEADER: case MLX5DR_ACTION_TYP_VPORT: + case MLX5DR_ACTION_TYP_JUMP_TO_MATCHER: mlx5dr_action_destroy_stcs(action); break; case MLX5DR_ACTION_TYP_DEST_ROOT: @@ -3618,6 +3681,19 @@ mlx5dr_action_setter_default_hit(struct mlx5dr_actions_apply_data *apply, htobe32(apply->common_res->default_stc->default_hit.offset); } +static void +mlx5dr_action_setter_hit_matcher(struct mlx5dr_actions_apply_data *apply, + struct mlx5dr_actions_wqe_setter *setter) +{ + struct mlx5dr_rule_action *rule_action; + + rule_action = &apply->rule_action[setter->idx_hit]; + + apply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = + htobe32(rule_action->jump_to_matcher.offset << 6); + mlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_HIT, setter->idx_hit); +} + static void mlx5dr_action_setter_hit_next_action(struct mlx5dr_actions_apply_data *apply, __rte_unused struct mlx5dr_actions_wqe_setter *setter) @@ -3965,6 +4041,12 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at) } break; + case MLX5DR_ACTION_TYP_JUMP_TO_MATCHER: + last_setter->flags |= ASF_HIT; + last_setter->set_hit = &mlx5dr_action_setter_hit_matcher; + last_setter->idx_hit = i; + break; + default: DR_LOG(ERR, "Unsupported action type: %d", action_type[i]); rte_errno = ENOTSUP; diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h index ba4ce55228..8ce4ecd5ba 100644 --- a/drivers/net/mlx5/hws/mlx5dr_action.h +++ b/drivers/net/mlx5/hws/mlx5dr_action.h @@ -223,6 +223,9 @@ struct mlx5dr_action { struct { struct mlx5dr_action *stages[MLX5DR_ACTION_NAT64_STAGES]; } nat64; + struct { + struct mlx5dr_matcher *matcher; + } jump_to_matcher; }; }; diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c index 741a725842..8684a8197a 100644 --- a/drivers/net/mlx5/hws/mlx5dr_debug.c +++ b/drivers/net/mlx5/hws/mlx5dr_debug.c @@ -29,6 +29,7 @@ const char *mlx5dr_debug_action_type_str[] = { [MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT] = "POP_IPV6_ROUTE_EXT", [MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT] = "PUSH_IPV6_ROUTE_EXT", [MLX5DR_ACTION_TYP_NAT64] = "NAT64", + [MLX5DR_ACTION_TYP_JUMP_TO_MATCHER] = "JUMP_TO_MATCHER", }; static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX, @@ -182,7 +183,7 @@ mlx5dr_debug_dump_matcher_attr(FILE *f, struct mlx5dr_matcher *matcher) struct mlx5dr_matcher_attr *attr = &matcher->attr; int ret; - ret = fprintf(f, "%d,0x%" PRIx64 ",%d,%d,%d,%d,%d,%d,%d,%d\n", + ret = fprintf(f, "%d,0x%" PRIx64 ",%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", MLX5DR_DEBUG_RES_TYPE_MATCHER_ATTR, (uint64_t)(uintptr_t)matcher, attr->priority, @@ -192,7 +193,9 @@ mlx5dr_debug_dump_matcher_attr(FILE *f, struct mlx5dr_matcher *matcher) attr->optimize_using_rule_idx, attr->optimize_flow_src, attr->insert_mode, - attr->distribute_mode); + attr->distribute_mode, + attr->match_mode, + attr->isolated); if (ret < 0) { rte_errno = EINVAL; return rte_errno; @@ -377,6 +380,12 @@ static int mlx5dr_debug_dump_table(FILE *f, struct mlx5dr_table *tbl) return ret; } + LIST_FOREACH(matcher, &tbl->isolated_matchers, next) { + ret = mlx5dr_debug_dump_matcher(f, matcher); + if (ret) + return ret; + } + return 0; out_err: diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index a9fa5d06ed..e6d3dbfa46 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -2,6 +2,8 @@ * Copyright (c) 2022 NVIDIA Corporation & Affiliates */ +#include + #include "mlx5dr_internal.h" #define GTP_PDU_SC 0x85 @@ -385,6 +387,27 @@ mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc, (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK); uint8_t l4_type = STE_NO_L4; + if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP)) + l4_type = STE_TCP; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP)) + l4_type = STE_UDP; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ESP : RTE_PTYPE_L4_ESP)) + l4_type = STE_ESP; + + DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +mlx5dr_definer_ptype_l4_ext_set(struct mlx5dr_definer_fc *fc, + const void *item_spec, + uint8_t *tag) +{ + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I); + const struct rte_flow_item_ptype *v = item_spec; + uint32_t packet_type = v->packet_type & + (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK); + uint8_t l4_type = STE_NO_L4; + if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP)) l4_type = STE_TCP; else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP)) @@ -1548,7 +1571,7 @@ mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd, fc->tag_set = &mlx5dr_definer_vport_set; fc->tag_mask_set = &mlx5dr_definer_ones_set; DR_CALC_SET_HDR(fc, registers, register_c_0); - fc->bit_off = __builtin_ctz(caps->wire_regc_mask); + fc->bit_off = rte_ctz32(caps->wire_regc_mask); fc->bit_mask = caps->wire_regc_mask >> fc->bit_off; fc->dr_ctx = cd->ctx; } else { @@ -2193,6 +2216,12 @@ mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd, fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_ptype_l4_set; fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l4_type_bwc, false); + + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, false)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; DR_CALC_SET(fc, eth_l2, l4_type, false); } } @@ -2209,6 +2238,12 @@ mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd, fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_ptype_l4_set; fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l4_type_bwc, true); + + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, true)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; DR_CALC_SET(fc, eth_l2, l4_type, true); } } @@ -2666,8 +2701,8 @@ mlx5dr_definer_conv_item_geneve_opt(struct mlx5dr_definer_conv_data *cd, fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_ones_set; fc->byte_off = hl_ok_bit->dw_offset * DW_SIZE + - __builtin_clz(hl_ok_bit->dw_mask) / 8; - fc->bit_off = __builtin_ctz(hl_ok_bit->dw_mask); + rte_clz32(hl_ok_bit->dw_mask) / 8; + fc->bit_off = rte_ctz32(hl_ok_bit->dw_mask); fc->bit_mask = 0x1; } @@ -4056,6 +4091,7 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx, if (i && ((is_range && !has_range) || (!is_range && has_range))) { DR_LOG(ERR, "Using range and non range templates is not allowed"); + rte_errno = EINVAL; goto free_definers; } diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h index b583f78943..092b1b3b10 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.h +++ b/drivers/net/mlx5/hws/mlx5dr_definer.h @@ -207,6 +207,8 @@ enum mlx5dr_definer_fname { MLX5DR_DEFINER_FNAME_PTYPE_L3_I, MLX5DR_DEFINER_FNAME_PTYPE_L4_O, MLX5DR_DEFINER_FNAME_PTYPE_L4_I, + MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_O, + MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I, MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL, MLX5DR_DEFINER_FNAME_PTYPE_FRAG_O, MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I, diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.c b/drivers/net/mlx5/hws/mlx5dr_matcher.c index dfa2cd435c..54460cc82b 100644 --- a/drivers/net/mlx5/hws/mlx5dr_matcher.c +++ b/drivers/net/mlx5/hws/mlx5dr_matcher.c @@ -198,6 +198,18 @@ static int mlx5dr_matcher_connect(struct mlx5dr_matcher *matcher) struct mlx5dr_matcher *tmp_matcher; int ret; + if (matcher->attr.isolated) { + LIST_INSERT_HEAD(&tbl->isolated_matchers, matcher, next); + ret = mlx5dr_table_connect_src_ft_to_miss_table(tbl, matcher->end_ft, + tbl->default_miss.miss_tbl); + if (ret) { + DR_LOG(ERR, "Failed to connect the new matcher to the miss_tbl"); + goto remove_from_list; + } + + return 0; + } + /* Find location in matcher list */ if (LIST_EMPTY(&tbl->head)) { LIST_INSERT_HEAD(&tbl->head, matcher, next); @@ -230,7 +242,7 @@ static int mlx5dr_matcher_connect(struct mlx5dr_matcher *matcher) } } else { /* Connect last matcher to next miss_tbl if exists */ - ret = mlx5dr_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + ret = mlx5dr_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl, true); if (ret) { DR_LOG(ERR, "Failed connect new matcher to miss_tbl"); goto remove_from_list; @@ -284,6 +296,11 @@ static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher) struct mlx5dr_matcher *next; int ret; + if (matcher->attr.isolated) { + LIST_REMOVE(matcher, next); + return 0; + } + prev_ft = tbl->ft; prev_matcher = LIST_FIRST(&tbl->head); LIST_FOREACH(tmp_matcher, &tbl->head, next) { @@ -309,7 +326,7 @@ static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher) goto matcher_reconnect; } } else { - ret = mlx5dr_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl); + ret = mlx5dr_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl, true); if (ret) { DR_LOG(ERR, "Failed to disconnect last matcher"); goto matcher_reconnect; @@ -518,14 +535,17 @@ static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher, } } else if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) { rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; - rtc_attr.num_hash_definer = 1; if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_HASH) { /* Hash Split Table */ + if (mlx5dr_matcher_is_always_hit(matcher)) + rtc_attr.num_hash_definer = 1; + rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; rtc_attr.match_definer_0 = mlx5dr_definer_get_id(mt->definer); } else if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR) { /* Linear Lookup Table */ + rtc_attr.num_hash_definer = 1; rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR; rtc_attr.match_definer_0 = ctx->caps->linear_match_definer; } @@ -973,10 +993,17 @@ mlx5dr_matcher_validate_insert_mode(struct mlx5dr_cmd_query_caps *caps, if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_HASH) { /* Hash Split Table */ - if (!caps->rtc_hash_split_table) { + if (attr->match_mode == MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT && + !caps->rtc_hash_split_table) { DR_LOG(ERR, "FW doesn't support insert by index and hash distribute"); goto not_supported; } + + if (attr->match_mode == MLX5DR_MATCHER_MATCH_MODE_DEFAULT && + !attr->isolated) { + DR_LOG(ERR, "STE array matcher supported only as an isolated matcher"); + goto not_supported; + } } else if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR) { /* Linear Lookup Table */ if (!caps->rtc_linear_lookup_table || @@ -991,6 +1018,12 @@ mlx5dr_matcher_validate_insert_mode(struct mlx5dr_cmd_query_caps *caps, MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX); goto not_supported; } + + if (attr->match_mode != MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT) { + DR_LOG(ERR, "Linear lookup tables will always hit, given match mode is not supported %d\n", + attr->match_mode); + goto not_supported; + } } else { DR_LOG(ERR, "Matcher has unsupported distribute mode"); goto not_supported; @@ -1032,6 +1065,11 @@ mlx5dr_matcher_process_attr(struct mlx5dr_cmd_query_caps *caps, DR_LOG(ERR, "Root matcher does not support resizing"); goto not_supported; } + if (attr->isolated) { + DR_LOG(ERR, "Root matcher can not be isolated"); + goto not_supported; + } + return 0; } @@ -1045,6 +1083,18 @@ mlx5dr_matcher_process_attr(struct mlx5dr_cmd_query_caps *caps, attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_HASH) attr->table.sz_col_log = mlx5dr_matcher_rules_to_tbl_depth(attr->rule.num_log); + if (attr->isolated) { + if (attr->insert_mode != MLX5DR_MATCHER_INSERT_BY_INDEX || + attr->distribute_mode != MLX5DR_MATCHER_DISTRIBUTE_BY_HASH || + attr->match_mode != MLX5DR_MATCHER_MATCH_MODE_DEFAULT) { + DR_LOG(ERR, "Isolated matcher only supported for STE array matcher"); + goto not_supported; + } + + /* We reach here only in case of STE array */ + matcher->flags |= MLX5DR_MATCHER_FLAGS_STE_ARRAY; + } + matcher->flags |= attr->resizable ? MLX5DR_MATCHER_FLAGS_RESIZABLE : 0; return mlx5dr_matcher_check_attr_sz(caps, attr); diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.h b/drivers/net/mlx5/hws/mlx5dr_matcher.h index ca6a5298d9..ef42b7de6b 100644 --- a/drivers/net/mlx5/hws/mlx5dr_matcher.h +++ b/drivers/net/mlx5/hws/mlx5dr_matcher.h @@ -28,6 +28,7 @@ enum mlx5dr_matcher_flags { MLX5DR_MATCHER_FLAGS_COLLISION = 1 << 2, MLX5DR_MATCHER_FLAGS_RESIZABLE = 1 << 3, MLX5DR_MATCHER_FLAGS_COMPARE = 1 << 4, + MLX5DR_MATCHER_FLAGS_STE_ARRAY = 1 << 5, }; struct mlx5dr_match_template { @@ -146,6 +147,11 @@ static inline bool mlx5dr_matcher_is_insert_by_idx(struct mlx5dr_matcher *matche return matcher->attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX; } +static inline bool mlx5dr_matcher_is_always_hit(struct mlx5dr_matcher *matcher) +{ + return matcher->attr.match_mode == MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT; +} + int mlx5dr_matcher_free_rtc_pointing(struct mlx5dr_context *ctx, uint32_t fw_ft_type, enum mlx5dr_table_type type, diff --git a/drivers/net/mlx5/hws/mlx5dr_rule.c b/drivers/net/mlx5/hws/mlx5dr_rule.c index 5d66d81ea5..519328ccf3 100644 --- a/drivers/net/mlx5/hws/mlx5dr_rule.c +++ b/drivers/net/mlx5/hws/mlx5dr_rule.c @@ -539,7 +539,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule, * will always match and perform the specified actions, which * makes the tag irrelevant. */ - if (likely(!mlx5dr_matcher_is_insert_by_idx(matcher) && !is_update)) + if (likely(!mlx5dr_matcher_is_always_hit(matcher) && !is_update)) mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz, (uint8_t *)dep_wqe->wqe_data.action); else if (unlikely(is_update)) diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c index ab73017ade..634b484a94 100644 --- a/drivers/net/mlx5/hws/mlx5dr_table.c +++ b/drivers/net/mlx5/hws/mlx5dr_table.c @@ -429,7 +429,7 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl) { struct mlx5dr_context *ctx = tbl->ctx; pthread_spin_lock(&ctx->ctrl_lock); - if (!LIST_EMPTY(&tbl->head)) { + if (!LIST_EMPTY(&tbl->head) || !LIST_EMPTY(&tbl->isolated_matchers)) { DR_LOG(ERR, "Cannot destroy table containing matchers"); rte_errno = EBUSY; goto unlock_err; @@ -531,7 +531,7 @@ int mlx5dr_table_update_connected_miss_tables(struct mlx5dr_table *dst_tbl) return 0; LIST_FOREACH(src_tbl, &dst_tbl->default_miss.head, default_miss.next) { - ret = mlx5dr_table_connect_to_miss_table(src_tbl, dst_tbl); + ret = mlx5dr_table_connect_to_miss_table(src_tbl, dst_tbl, false); if (ret) { DR_LOG(ERR, "Failed to update source miss table, unexpected behavior"); return ret; @@ -541,34 +541,32 @@ int mlx5dr_table_update_connected_miss_tables(struct mlx5dr_table *dst_tbl) return 0; } -int mlx5dr_table_connect_to_miss_table(struct mlx5dr_table *src_tbl, - struct mlx5dr_table *dst_tbl) +int mlx5dr_table_connect_src_ft_to_miss_table(struct mlx5dr_table *src_tbl, + struct mlx5dr_devx_obj *ft, + struct mlx5dr_table *dst_tbl) { - struct mlx5dr_devx_obj *last_ft; struct mlx5dr_matcher *matcher; int ret; - last_ft = mlx5dr_table_get_last_ft(src_tbl); - if (dst_tbl) { if (LIST_EMPTY(&dst_tbl->head)) { - /* Connect src_tbl last_ft to dst_tbl start anchor */ - ret = mlx5dr_table_ft_set_next_ft(last_ft, + /* Connect src_tbl ft to dst_tbl start anchor */ + ret = mlx5dr_table_ft_set_next_ft(ft, src_tbl->fw_ft_type, dst_tbl->ft->id); if (ret) return ret; - /* Reset last_ft RTC to default RTC */ - ret = mlx5dr_table_ft_set_next_rtc(last_ft, + /* Reset ft RTC to default RTC */ + ret = mlx5dr_table_ft_set_next_rtc(ft, src_tbl->fw_ft_type, NULL, NULL); if (ret) return ret; } else { - /* Connect src_tbl last_ft to first matcher RTC */ + /* Connect src_tbl ft to first matcher RTC */ matcher = LIST_FIRST(&dst_tbl->head); - ret = mlx5dr_table_ft_set_next_rtc(last_ft, + ret = mlx5dr_table_ft_set_next_rtc(ft, src_tbl->fw_ft_type, matcher->match_ste.rtc_0, matcher->match_ste.rtc_1); @@ -576,24 +574,51 @@ int mlx5dr_table_connect_to_miss_table(struct mlx5dr_table *src_tbl, return ret; /* Reset next miss FT to default */ - ret = mlx5dr_table_ft_set_default_next_ft(src_tbl, last_ft); + ret = mlx5dr_table_ft_set_default_next_ft(src_tbl, ft); if (ret) return ret; } } else { /* Reset next miss FT to default */ - ret = mlx5dr_table_ft_set_default_next_ft(src_tbl, last_ft); + ret = mlx5dr_table_ft_set_default_next_ft(src_tbl, ft); if (ret) return ret; - /* Reset last_ft RTC to default RTC */ - ret = mlx5dr_table_ft_set_next_rtc(last_ft, + /* Reset ft RTC to default RTC */ + ret = mlx5dr_table_ft_set_next_rtc(ft, src_tbl->fw_ft_type, NULL, NULL); if (ret) return ret; } + return 0; +} + +int mlx5dr_table_connect_to_miss_table(struct mlx5dr_table *src_tbl, + struct mlx5dr_table *dst_tbl, + bool only_update_last_ft) +{ + struct mlx5dr_matcher *matcher; + struct mlx5dr_devx_obj *ft; + int ret; + + /* Connect last FT in the src_tbl matchers chain */ + ft = mlx5dr_table_get_last_ft(src_tbl); + ret = mlx5dr_table_connect_src_ft_to_miss_table(src_tbl, ft, dst_tbl); + if (ret) + return ret; + + if (!only_update_last_ft) { + /* Connect isolated matchers FT */ + LIST_FOREACH(matcher, &src_tbl->isolated_matchers, next) { + ft = matcher->end_ft; + ret = mlx5dr_table_connect_src_ft_to_miss_table(src_tbl, ft, dst_tbl); + if (ret) + return ret; + } + } + src_tbl->default_miss.miss_tbl = dst_tbl; return 0; @@ -633,7 +658,7 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, pthread_spin_lock(&ctx->ctrl_lock); old_miss_tbl = tbl->default_miss.miss_tbl; - ret = mlx5dr_table_connect_to_miss_table(tbl, miss_tbl); + ret = mlx5dr_table_connect_to_miss_table(tbl, miss_tbl, false); if (ret) goto out; diff --git a/drivers/net/mlx5/hws/mlx5dr_table.h b/drivers/net/mlx5/hws/mlx5dr_table.h index b2fbb47416..32f2574a97 100644 --- a/drivers/net/mlx5/hws/mlx5dr_table.h +++ b/drivers/net/mlx5/hws/mlx5dr_table.h @@ -23,6 +23,7 @@ struct mlx5dr_table { uint32_t fw_ft_type; uint32_t level; LIST_HEAD(matcher_head, mlx5dr_matcher) head; + LIST_HEAD(isolated_matchers_head, mlx5dr_matcher) isolated_matchers; LIST_ENTRY(mlx5dr_table) next; struct mlx5dr_default_miss default_miss; }; @@ -54,7 +55,8 @@ void mlx5dr_table_destroy_default_ft(struct mlx5dr_table *tbl, struct mlx5dr_devx_obj *ft_obj); int mlx5dr_table_connect_to_miss_table(struct mlx5dr_table *src_tbl, - struct mlx5dr_table *dst_tbl); + struct mlx5dr_table *dst_tbl, + bool only_update_last_ft); int mlx5dr_table_update_connected_miss_tables(struct mlx5dr_table *dst_tbl); @@ -66,4 +68,8 @@ int mlx5dr_table_ft_set_next_rtc(struct mlx5dr_devx_obj *ft, struct mlx5dr_devx_obj *rtc_0, struct mlx5dr_devx_obj *rtc_1); +int mlx5dr_table_connect_src_ft_to_miss_table(struct mlx5dr_table *src_tbl, + struct mlx5dr_devx_obj *ft, + struct mlx5dr_table *dst_tbl); + #endif /* MLX5DR_TABLE_H_ */ diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 0a8de88759..69a80b9ddc 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1612,9 +1612,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, priv->ctrl_flows = 0; rte_spinlock_init(&priv->flow_list_lock); TAILQ_INIT(&priv->flow_meters); - priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); - if (!priv->mtr_profile_tbl) - goto error; + if (priv->mtr_en) { + priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); + if (!priv->mtr_profile_tbl) + goto error; + } /* Bring Ethernet device up. */ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", eth_dev->data->port_id); @@ -1701,6 +1703,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, (sh->config.dv_flow_en == 1 && mlx5_flow_discover_ipv6_tc_support(eth_dev))) sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_FALLBACK; } + rte_spinlock_init(&priv->hw_ctrl_lock); + LIST_INIT(&priv->hw_ctrl_flows); + LIST_INIT(&priv->hw_ext_ctrl_flows); if (priv->sh->config.dv_flow_en == 2) { #ifdef HAVE_MLX5_HWS_SUPPORT if (priv->sh->config.dv_esw_en) { @@ -2549,13 +2554,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, list[ns].info.master)) ns++; } - if (!ns) { - DRV_LOG(ERR, - "Unable to recognize master/representors on the IB device with multiple ports."); - rte_errno = ENOENT; - ret = -rte_errno; - goto exit; - } } else { /* * The existence of several matching entries (nd > 1) means diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build index eb5eb2cce7..78a8648b43 100644 --- a/drivers/net/mlx5/meson.build +++ b/drivers/net/mlx5/meson.build @@ -56,6 +56,13 @@ if is_linux ) endif +if is_windows or (mlx5_config.get('HAVE_INFINIBAND_VERBS_H', false) and + not mlx5_config.get('HAVE_IBV_FLOW_DV_SUPPORT', false)) + sources += files( + 'mlx5_flow_hw_stubs.c', + ) +endif + if is_linux and (dpdk_conf.has('RTE_ARCH_X86_64') or dpdk_conf.has('RTE_ARCH_ARM64') or dpdk_conf.has('RTE_ARCH_PPC_64')) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index e36fa651a1..52b90e6ff3 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -2242,6 +2242,7 @@ int mlx5_proc_priv_init(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_proc_priv *ppriv; size_t ppriv_size; @@ -2262,6 +2263,9 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) dev->process_private = ppriv; if (rte_eal_process_type() == RTE_PROC_PRIMARY) priv->sh->pppriv = ppriv; + /* Check and try to map HCA PCI BAR to allow reading real time. */ + if (sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) + mlx5_txpp_map_hca_bar(dev); return 0; } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 18b4c15a26..503366580b 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -1787,21 +1787,23 @@ struct mlx5_obj_ops { #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields) -enum mlx5_hw_ctrl_flow_type { - MLX5_HW_CTRL_FLOW_TYPE_GENERAL, - MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT, - MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS, - MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP, - MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY, - MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH, - MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, - MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, +enum mlx5_ctrl_flow_type { + MLX5_CTRL_FLOW_TYPE_GENERAL, + MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT, + MLX5_CTRL_FLOW_TYPE_SQ_MISS, + MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP, + MLX5_CTRL_FLOW_TYPE_TX_META_COPY, + MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH, + MLX5_CTRL_FLOW_TYPE_LACP_RX, + MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, + MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC, + MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN, }; /** Additional info about control flow rule. */ -struct mlx5_hw_ctrl_flow_info { +struct mlx5_ctrl_flow_info { /** Determines the kind of control flow rule. */ - enum mlx5_hw_ctrl_flow_type type; + enum mlx5_ctrl_flow_type type; union { /** * If control flow is a SQ miss flow (root or not), @@ -1813,12 +1815,36 @@ struct mlx5_hw_ctrl_flow_info { * then fields contains matching SQ number. */ uint32_t tx_repr_sq; + /** Contains data relevant for unicast control flow rules. */ + struct { + /** + * If control flow is a unicast DMAC (or with VLAN) flow rule, + * then this field contains DMAC. + */ + struct rte_ether_addr dmac; + /** + * If control flow is a unicast DMAC with VLAN flow rule, + * then this field contains VLAN ID. + */ + uint16_t vlan; + } uc; }; }; +/** Returns true if a control flow rule with unicast DMAC match on given address was created. */ +bool mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); + +/** + * Returns true if a control flow rule with unicast DMAC and VLAN match + * on given values was created. + */ +bool mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid); + /** Entry for tracking control flow rules in HWS. */ -struct mlx5_hw_ctrl_flow { - LIST_ENTRY(mlx5_hw_ctrl_flow) next; +struct mlx5_ctrl_flow_entry { + LIST_ENTRY(mlx5_ctrl_flow_entry) next; /** * Owner device is a port on behalf of which flow rule was created. * @@ -1830,7 +1856,7 @@ struct mlx5_hw_ctrl_flow { /** Pointer to flow rule handle. */ struct rte_flow *flow; /** Additional information about the control flow rule. */ - struct mlx5_hw_ctrl_flow_info info; + struct mlx5_ctrl_flow_info info; }; /* HW Steering port configuration passed to rte_flow_configure(). */ @@ -1939,8 +1965,8 @@ struct mlx5_priv { struct mlx5_drop drop_queue; /* Flow drop queues. */ void *root_drop_action; /* Pointer to root drop action. */ rte_spinlock_t hw_ctrl_lock; - LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows; - LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows; + LIST_HEAD(hw_ctrl_flow, mlx5_ctrl_flow_entry) hw_ctrl_flows; + LIST_HEAD(hw_ext_ctrl_flow, mlx5_ctrl_flow_entry) hw_ext_ctrl_flows; struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; struct rte_flow_pattern_template *hw_tx_repr_tagging_pt; struct rte_flow_actions_template *hw_tx_repr_tagging_at; @@ -2346,6 +2372,10 @@ int mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port); int mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port); int mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, size_t len, uint32_t direction); +int mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); +int mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); +int mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid); +int mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid); /* mlx5_flow.c */ @@ -2652,4 +2682,22 @@ int mlx5_quota_query(struct rte_eth_dev *dev, uint32_t queue, int mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev); void mlx5_free_srh_flex_parser(struct rte_eth_dev *dev); + +/* mlx5_flow_hw.c */ +struct rte_pmd_mlx5_host_action; + +struct mlx5dr_action * +mlx5_flow_hw_get_dr_action(struct rte_eth_dev *dev, + struct rte_pmd_mlx5_host_action *action, + void **release_data); + +void +mlx5_flow_hw_put_dr_action(struct rte_eth_dev *dev, + enum rte_flow_action_type type, + void *release_data); + +bool +mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, + struct rte_flow_error *error); + #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 6f24d649e0..7708a0b808 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -359,6 +359,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; mlx5_set_default_params(dev, info); mlx5_set_txlimit_params(dev, info); + info->rx_desc_lim.nb_max = + 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz; + info->tx_desc_lim.nb_max = + 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz; if (priv->sh->cdev->config.hca_attr.mem_rq_rmp && priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new) info->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 7f8640b488..f8cfa661ec 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -8493,8 +8493,9 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, .type = RTE_FLOW_ACTION_TYPE_END, }, }; - uint32_t flow_idx; + uintptr_t flow_idx; struct rte_flow_error error; + struct mlx5_ctrl_flow_entry *entry; unsigned int i; if (!priv->reta_idx_n || !priv->rxqs_n) { @@ -8504,11 +8505,36 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, action_rss.types = 0; for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; + + entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry), alignof(typeof(*entry)), SOCKET_ID_ANY); + if (entry == NULL) { + rte_errno = ENOMEM; + goto err; + } + + entry->owner_dev = dev; + if (vlan_spec == NULL) { + entry->info.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC; + } else { + entry->info.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN; + entry->info.uc.vlan = rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci); + } + entry->info.uc.dmac = eth_spec->hdr.dst_addr; + flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, items, actions, false, &error); - if (!flow_idx) - return -rte_errno; + if (!flow_idx) { + mlx5_free(entry); + goto err; + } + + entry->flow = (struct rte_flow *)flow_idx; + LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next); + return 0; + +err: + return -rte_errno; } /** @@ -8532,6 +8558,86 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL); } +int +mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct rte_flow_item_eth unicast = { + .hdr.dst_addr = *addr, + }; + struct rte_flow_item_eth unicast_mask = { + .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; + + return mlx5_ctrl_flow(dev, &unicast, &unicast_mask); +} + +int +mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid) +{ + struct rte_flow_item_eth unicast_spec = { + .hdr.dst_addr = *addr, + }; + struct rte_flow_item_eth unicast_mask = { + .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; + struct rte_flow_item_vlan vlan_spec = { + .hdr.vlan_tci = rte_cpu_to_be_16(vid), + }; + struct rte_flow_item_vlan vlan_mask = rte_flow_item_vlan_mask; + + return mlx5_ctrl_flow_vlan(dev, &unicast_spec, &unicast_mask, &vlan_spec, &vlan_mask); +} + +void +mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry) +{ + uintptr_t flow_idx; + + flow_idx = (uintptr_t)entry->flow; + mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_CTL, flow_idx); + LIST_REMOVE(entry, next); + mlx5_free(entry); +} + +int +mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + + LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) { + if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC || + !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) + continue; + + mlx5_legacy_ctrl_flow_destroy(dev, entry); + return 0; + } + return 0; +} + +int +mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + + LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) { + if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN || + !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) || + vid != entry->info.uc.vlan) + continue; + + mlx5_legacy_ctrl_flow_destroy(dev, entry); + return 0; + } + return 0; +} + /** * Create default miss flow rule matching lacp traffic * @@ -12178,3 +12284,40 @@ rte_pmd_mlx5_destroy_geneve_tlv_parser(void *handle) return -rte_errno; #endif } + +bool +mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + bool exists = false; + + LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) { + if (entry->info.type == MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC && + rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) { + exists = true; + break; + } + } + return exists; +} + +bool +mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + bool exists = false; + + LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) { + if (entry->info.type == MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN && + rte_is_same_ether_addr(addr, &entry->info.uc.dmac) && + vid == entry->info.uc.vlan) { + exists = true; + break; + } + } + return exists; +} diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 86a1476879..693e07218d 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -168,6 +168,9 @@ struct mlx5_flow_action_copy_mreg { /* Matches on source queue. */ struct mlx5_rte_flow_item_sq { uint32_t queue; /* DevX SQ number */ +#ifdef RTE_ARCH_64 + uint32_t reserved; +#endif }; /* Map from registers to modify fields. */ @@ -398,6 +401,7 @@ enum mlx5_feature_name { #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48) #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49) #define MLX5_FLOW_ACTION_NAT64 (1ull << 50) +#define MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX (1ull << 51) #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \ (MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE) @@ -408,12 +412,14 @@ enum mlx5_feature_name { MLX5_FLOW_ACTION_DEFAULT_MISS | \ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \ MLX5_FLOW_ACTION_SEND_TO_KERNEL | \ - MLX5_FLOW_ACTION_PORT_REPRESENTOR) + MLX5_FLOW_ACTION_PORT_REPRESENTOR | \ + MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX) #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ MLX5_FLOW_ACTION_SEND_TO_KERNEL | \ - MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) + MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \ + MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX) #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ MLX5_FLOW_ACTION_SET_IPV4_DST | \ @@ -974,7 +980,7 @@ struct mlx5_flow_verbs_workspace { #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1 /** Maximal number of device sub-flows supported. */ -#define MLX5_NUM_MAX_DEV_FLOWS 32 +#define MLX5_NUM_MAX_DEV_FLOWS 64 /** * tunnel offload rules type @@ -1704,6 +1710,7 @@ struct mlx5_flow_template_table_cfg { struct mlx5_matcher_info { struct mlx5dr_matcher *matcher; /* Template matcher. */ + struct mlx5dr_action *jump; /* Jump to matcher action. */ RTE_ATOMIC(uint32_t) refcnt; }; @@ -2990,6 +2997,42 @@ struct mlx5_flow_hw_ctrl_fdb { #define MLX5_CTRL_VLAN_FILTER (RTE_BIT32(6)) int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags); + +/** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ +int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); + +/** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ +int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); + +/** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ +int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid); + +/** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ +int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid); + +/** Destroy a control flow rule registered on port level control flow rule type. */ +void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry); + +/** Create a control flow rule for matching unicast DMAC (HWS). */ +int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); + +/** Destroy a control flow rule for matching unicast DMAC (HWS). */ +int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); + +/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */ +int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vlan); + +/** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */ +int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vlan); + void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev); int mlx5_flow_group_to_table(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 201e215e4b..41ebe0b61a 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -4046,10 +4047,19 @@ mlx5_flow_dv_validate_action_raw_encap_decap const struct mlx5_priv *priv = dev->data->dev_private; int ret; - if (encap && (!encap->size || !encap->data)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "raw encap data cannot be empty"); + if (encap) { + if (!mlx5_hws_active(dev)) { + if (!encap->size || !encap->data) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, "raw encap data cannot be empty"); + } else { + if (!encap->size) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, "raw encap size cannot be 0"); + } + } if (decap && encap) { if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) @@ -9068,7 +9078,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, !(non_shared_age && count) && (attr->group || (attr->transfer && priv->fdb_def_rule)) && priv->sh->flow_hit_aso_en); - if (__builtin_popcountl(aso_mask) > 1) + if (rte_popcount64(aso_mask) > 1) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule"); /* @@ -9829,22 +9839,23 @@ flow_dv_translate_item_gre(void *key, const struct rte_flow_item *item, } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; uint16_t protocol_m, protocol_v; - if (key_type & MLX5_SET_MATCHER_M) + if (key_type & MLX5_SET_MATCHER_M) { MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xff); - else + if (!gre_m) + gre_m = &rte_flow_item_gre_mask; + gre_v = gre_m; + } else { MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); - if (!gre_v) { - gre_v = &empty_gre; - gre_m = &empty_gre; - } else { - if (!gre_m) + if (!gre_v) { + gre_v = &empty_gre; + gre_m = &empty_gre; + } else if (!gre_m) { gre_m = &rte_flow_item_gre_mask; + } + if (key_type == MLX5_SET_MATCHER_HS_V) + gre_m = gre_v; } - if (key_type & MLX5_SET_MATCHER_M) - gre_v = gre_m; - else if (key_type == MLX5_SET_MATCHER_HS_V) - gre_m = gre_v; gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index 0084f81980..2a9ef71cd8 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -208,7 +208,12 @@ mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment); static __rte_always_inline enum mlx5_indirect_list_type flow_hw_inlist_type_get(const struct rte_flow_action *actions); -static bool +static int +flow_hw_allocate_actions(struct rte_eth_dev *dev, + uint64_t action_flags, + struct rte_flow_error *error); + +bool mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error) { const struct mlx5_priv *priv = dev->data->dev_private; @@ -335,18 +340,13 @@ static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev static int flow_hw_async_create_validate(struct rte_eth_dev *dev, const uint32_t queue, const struct rte_flow_template_table *table, + enum rte_flow_table_insertion_type insertion_type, + const uint32_t rule_index, const struct rte_flow_item items[], const uint8_t pattern_template_index, const struct rte_flow_action actions[], const uint8_t action_template_index, struct rte_flow_error *error); -static int flow_hw_async_create_by_index_validate(struct rte_eth_dev *dev, - const uint32_t queue, - const struct rte_flow_template_table *table, - const uint32_t rule_index, - const struct rte_flow_action actions[], - const uint8_t action_template_index, - struct rte_flow_error *error); static int flow_hw_async_update_validate(struct rte_eth_dev *dev, const uint32_t queue, const struct rte_flow_hw *flow, @@ -734,6 +734,9 @@ flow_hw_action_flags_get(const struct rte_flow_action actions[], case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: + action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX; + break; case RTE_FLOW_ACTION_TYPE_VOID: case RTE_FLOW_ACTION_TYPE_END: break; @@ -2930,6 +2933,34 @@ __flow_hw_translate_actions_template(struct rte_eth_dev *dev, src_pos, dr_pos)) goto err; break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: + if (masks->conf && + ((const struct rte_flow_action_jump_to_table_index *) + masks->conf)->table) { + struct rte_flow_template_table *jump_table = + ((const struct rte_flow_action_jump_to_table_index *) + actions->conf)->table; + acts->rule_acts[dr_pos].jump_to_matcher.offset = + ((const struct rte_flow_action_jump_to_table_index *) + actions->conf)->index; + if (likely(!rte_flow_template_table_resizable(dev->data->port_id, + &jump_table->cfg.attr))) { + acts->rule_acts[dr_pos].action = + jump_table->matcher_info[0].jump; + } else { + uint32_t selector; + rte_rwlock_read_lock(&jump_table->matcher_replace_rwlk); + selector = jump_table->matcher_selector; + acts->rule_acts[dr_pos].action = + jump_table->matcher_info[selector].jump; + rte_rwlock_read_unlock(&jump_table->matcher_replace_rwlk); + } + } else if (__flow_hw_act_data_general_append + (priv, acts, actions->type, + src_pos, dr_pos)){ + goto err; + } + break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; break; @@ -3532,6 +3563,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, cnt_id_t cnt_id; uint32_t *cnt_queue; uint32_t mtr_id; + struct rte_flow_template_table *jump_table; action = &actions[act_data->action_src]; /* @@ -3547,7 +3579,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, (int)action->type == act_data->type); switch ((int)act_data->type) { case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST: - act_data->indirect_list_cb(dev, act_data, actions, + act_data->indirect_list_cb(dev, act_data, action, &rule_acts[act_data->action_dst]); break; case RTE_FLOW_ACTION_TYPE_INDIRECT: @@ -3764,6 +3796,25 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, rule_acts[act_data->action_dst].action = priv->action_nat64[table->type][nat64_c->type]; break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: + jump_table = ((const struct rte_flow_action_jump_to_table_index *) + action->conf)->table; + if (likely(!rte_flow_template_table_resizable(dev->data->port_id, + &table->cfg.attr))) { + rule_acts[act_data->action_dst].action = + jump_table->matcher_info[0].jump; + } else { + uint32_t selector; + rte_rwlock_read_lock(&table->matcher_replace_rwlk); + selector = table->matcher_selector; + rule_acts[act_data->action_dst].action = + jump_table->matcher_info[selector].jump; + rte_rwlock_read_unlock(&table->matcher_replace_rwlk); + } + rule_acts[act_data->action_dst].jump_to_matcher.offset = + ((const struct rte_flow_action_jump_to_table_index *) + action->conf)->index; + break; default: break; } @@ -3884,6 +3935,12 @@ flow_hw_get_rule_items(struct rte_eth_dev *dev, * The queue to create the flow. * @param[in] attr * Pointer to the flow operation attributes. + * @param[in] table + * Pointer to the template table. + * @param[in] insertion_type + * Insertion type for flow rules. + * @param[in] rule_index + * The item pattern flow follows from the table. * @param[in] items * Items with flow spec value. * @param[in] pattern_template_index @@ -3900,17 +3957,19 @@ flow_hw_get_rule_items(struct rte_eth_dev *dev, * @return * Flow pointer on success, NULL otherwise and rte_errno is set. */ -static struct rte_flow * -flow_hw_async_flow_create(struct rte_eth_dev *dev, - uint32_t queue, - const struct rte_flow_op_attr *attr, - struct rte_flow_template_table *table, - const struct rte_flow_item items[], - uint8_t pattern_template_index, - const struct rte_flow_action actions[], - uint8_t action_template_index, - void *user_data, - struct rte_flow_error *error) +static __rte_always_inline struct rte_flow * +flow_hw_async_flow_create_generic(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + enum rte_flow_table_insertion_type insertion_type, + uint32_t rule_index, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5dr_rule_attr rule_attr = { @@ -3928,8 +3987,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, int ret; if (mlx5_fp_debug_enabled()) { - if (flow_hw_async_create_validate(dev, queue, table, items, pattern_template_index, - actions, action_template_index, error)) + if (flow_hw_async_create_validate(dev, queue, table, insertion_type, rule_index, + items, pattern_template_index, actions, action_template_index, error)) return NULL; } flow = mlx5_ipool_malloc(table->flow, &flow_idx); @@ -3967,7 +4026,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices * for rule insertion hints. */ - flow->rule_idx = flow->res_idx - 1; + flow->rule_idx = (rule_index == UINT32_MAX) ? flow->res_idx - 1 : rule_index; rule_attr.rule_idx = flow->rule_idx; /* * Construct the flow actions based on the input actions. @@ -4023,33 +4082,26 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, return NULL; } -/** - * Enqueue HW steering flow creation by index. - * - * The flow will be applied to the HW only if the postpone bit is not set or - * the extra push function is called. - * The flow creation status should be checked from dequeue result. - * - * @param[in] dev - * Pointer to the rte_eth_dev structure. - * @param[in] queue - * The queue to create the flow. - * @param[in] attr - * Pointer to the flow operation attributes. - * @param[in] rule_index - * The item pattern flow follows from the table. - * @param[in] actions - * Action with flow spec value. - * @param[in] action_template_index - * The action pattern flow follows from the table. - * @param[in] user_data - * Pointer to the user_data. - * @param[out] error - * Pointer to error structure. - * - * @return - * Flow pointer on success, NULL otherwise and rte_errno is set. - */ +static struct rte_flow * +flow_hw_async_flow_create(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error) +{ + uint32_t rule_index = UINT32_MAX; + + return flow_hw_async_flow_create_generic(dev, queue, attr, table, + RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN, rule_index, + items, pattern_template_index, actions, action_template_index, + user_data, error); +} + static struct rte_flow * flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev, uint32_t queue, @@ -4062,105 +4114,31 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}}; - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5dr_rule_attr rule_attr = { - .queue_id = queue, - .user_data = user_data, - .burst = attr->postpone, - }; - struct mlx5dr_rule_action *rule_acts; - struct mlx5_flow_hw_action_params ap; - struct rte_flow_hw *flow = NULL; - uint32_t flow_idx = 0; - uint32_t res_idx = 0; - int ret; + uint8_t pattern_template_index = 0; - if (mlx5_fp_debug_enabled()) { - if (flow_hw_async_create_by_index_validate(dev, queue, table, rule_index, - actions, action_template_index, error)) - return NULL; - } - flow = mlx5_ipool_malloc(table->flow, &flow_idx); - if (!flow) { - rte_errno = ENOMEM; - goto error; - } - rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue); - /* - * Set the table here in order to know the destination table - * when free the flow afterwards. - */ - flow->table = table; - flow->mt_idx = 0; - flow->idx = flow_idx; - if (table->resource) { - mlx5_ipool_malloc(table->resource, &res_idx); - if (!res_idx) { - rte_errno = ENOMEM; - goto error; - } - flow->res_idx = res_idx; - } else { - flow->res_idx = flow_idx; - } - flow->flags = 0; - /* - * Set the flow operation type here in order to know if the flow memory - * should be freed or not when get the result from dequeue. - */ - flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE; - flow->user_data = user_data; - rule_attr.user_data = flow; - /* Set the rule index. */ - flow->rule_idx = rule_index; - rule_attr.rule_idx = flow->rule_idx; - /* - * Construct the flow actions based on the input actions. - * The implicitly appended action is always fixed, like metadata - * copy action from FDB to NIC Rx. - * No need to copy and contrust a new "actions" list based on the - * user's input, in order to save the cost. - */ - if (flow_hw_actions_construct(dev, flow, &ap, - &table->ats[action_template_index], - table->its[0]->item_flags, table, - actions, rule_acts, queue, error)) { - rte_errno = EINVAL; - goto error; - } - if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) { - ret = mlx5dr_rule_create(table->matcher_info[0].matcher, - 0, items, action_template_index, - rule_acts, &rule_attr, - (struct mlx5dr_rule *)flow->rule); - } else { - struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow); - uint32_t selector; + return flow_hw_async_flow_create_generic(dev, queue, attr, table, + RTE_FLOW_TABLE_INSERTION_TYPE_INDEX, rule_index, + items, pattern_template_index, actions, action_template_index, + user_data, error); +} - flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE; - rte_rwlock_read_lock(&table->matcher_replace_rwlk); - selector = table->matcher_selector; - ret = mlx5dr_rule_create(table->matcher_info[selector].matcher, - 0, items, action_template_index, - rule_acts, &rule_attr, - (struct mlx5dr_rule *)flow->rule); - rte_rwlock_read_unlock(&table->matcher_replace_rwlk); - aux->matcher_selector = selector; - flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR; - } - if (likely(!ret)) { - flow_hw_q_inc_flow_ops(priv, queue); - return (struct rte_flow *)flow; - } -error: - if (table->resource && res_idx) - mlx5_ipool_free(table->resource, res_idx); - if (flow_idx) - mlx5_ipool_free(table->flow, flow_idx); - rte_flow_error_set(error, rte_errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "fail to create rte flow"); - return NULL; +static struct rte_flow * +flow_hw_async_flow_create_by_index_with_pattern(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + uint32_t rule_index, + const struct rte_flow_item items[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error) +{ + return flow_hw_async_flow_create_generic(dev, queue, attr, table, + RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN, rule_index, + items, pattern_template_index, actions, action_template_index, + user_data, error); } /** @@ -5041,6 +5019,10 @@ flow_hw_table_create(struct rte_eth_dev *dev, }; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5dr_matcher_attr matcher_attr = {0}; + struct mlx5dr_action_jump_to_matcher_attr jump_attr = { + .type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX, + .matcher = NULL, + }; struct rte_flow_template_table *tbl = NULL; struct mlx5_flow_group *grp; struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE]; @@ -5157,6 +5139,15 @@ flow_hw_table_create(struct rte_eth_dev *dev, matcher_attr.optimize_using_rule_idx = true; matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE; matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type); + if (matcher_attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) { + if (attr->insertion_type == RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN) { + matcher_attr.isolated = true; + matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_DEFAULT; + } else { + matcher_attr.isolated = false; + matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT; + } + } if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) { DRV_LOG(ERR, "16-bit checksum hash type is not supported"); rte_errno = ENOTSUP; @@ -5222,6 +5213,13 @@ flow_hw_table_create(struct rte_eth_dev *dev, tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB : (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX : MLX5DR_TABLE_TYPE_NIC_RX); + if (matcher_attr.isolated) { + jump_attr.matcher = tbl->matcher_info[0].matcher; + tbl->matcher_info[0].jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx, + &jump_attr, mlx5_hw_act_flag[!!attr->flow_attr.group][tbl->type]); + if (!tbl->matcher_info[0].jump) + goto jtm_error; + } /* * Only the matcher supports update and needs more than 1 WQE, an additional * index is needed. Or else the flow index can be reused. @@ -5244,6 +5242,9 @@ flow_hw_table_create(struct rte_eth_dev *dev, rte_rwlock_init(&tbl->matcher_replace_rwlk); return tbl; res_error: + if (tbl->matcher_info[0].jump) + mlx5dr_action_destroy(tbl->matcher_info[0].jump); +jtm_error: if (tbl->matcher_info[0].matcher) (void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher); at_error: @@ -5508,8 +5509,12 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, 1, rte_memory_order_relaxed); } flow_hw_destroy_table_multi_pattern_ctx(table); + if (table->matcher_info[0].jump) + mlx5dr_action_destroy(table->matcher_info[0].jump); if (table->matcher_info[0].matcher) mlx5dr_matcher_destroy(table->matcher_info[0].matcher); + if (table->matcher_info[1].jump) + mlx5dr_action_destroy(table->matcher_info[1].jump); if (table->matcher_info[1].matcher) mlx5dr_matcher_destroy(table->matcher_info[1].matcher); mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry); @@ -6614,6 +6619,7 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[], case RTE_FLOW_ACTION_TYPE_DROP: case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL: case RTE_FLOW_ACTION_TYPE_JUMP: + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_RSS: case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: @@ -6830,6 +6836,43 @@ flow_hw_validate_action_jump(struct rte_eth_dev *dev, return 0; } +static int +mlx5_flow_validate_action_jump_to_table_index(const struct rte_flow_action *action, + const struct rte_flow_action *mask, + struct rte_flow_error *error) +{ + const struct rte_flow_action_jump_to_table_index *m = mask->conf; + const struct rte_flow_action_jump_to_table_index *v = action->conf; + struct mlx5dr_action *jump_action; + uint32_t t_group = 0; + + if (!m || !m->table) + return 0; + if (!v) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Invalid jump to matcher action configuration"); + t_group = v->table->grp->group_id; + if (t_group == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Unsupported action - jump to root table"); + if (likely(!rte_flow_template_table_resizable(0, &v->table->cfg.attr))) { + jump_action = v->table->matcher_info[0].jump; + } else { + uint32_t selector; + rte_rwlock_read_lock(&v->table->matcher_replace_rwlk); + selector = v->table->matcher_selector; + jump_action = v->table->matcher_info[selector].jump; + rte_rwlock_read_unlock(&v->table->matcher_replace_rwlk); + } + if (jump_action == NULL) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Unsupported action - table is not an rule array"); + return 0; +} + static int mlx5_hw_validate_action_mark(struct rte_eth_dev *dev, const struct rte_flow_action *template_action, @@ -7311,6 +7354,12 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, return ret; action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: + ret = mlx5_flow_validate_action_jump_to_table_index(action, mask, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -7355,6 +7404,7 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = { [RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT, [RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT, [RTE_FLOW_ACTION_TYPE_NAT64] = MLX5DR_ACTION_TYP_NAT64, + [RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER, }; static inline void @@ -7582,6 +7632,11 @@ flow_hw_parse_flow_actions_to_dr_actions(struct rte_eth_dev *dev, at->dr_off[i] = curr_off; action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS; break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: + *tmpl_flags |= MLX5DR_ACTION_TEMPLATE_FLAG_RELAXED_ORDER; + at->dr_off[i] = curr_off; + action_types[curr_off++] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER; + break; default: type = mlx5_hw_dr_action_types[at->actions[i].type]; at->dr_off[i] = curr_off; @@ -7873,8 +7928,8 @@ __flow_hw_actions_template_create(struct rte_eth_dev *dev, uint32_t tmpl_flags = 0; int ret; - if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks, - &action_flags, error)) + if (!nt_mode && mlx5_flow_hw_actions_validate(dev, attr, actions, masks, + &action_flags, error)) return NULL; for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) { switch (ra[i].type) { @@ -11669,6 +11724,7 @@ __flow_hw_configure(struct rte_eth_dev *dev, uint32_t action_flags; bool strict_queue = false; + error->type = RTE_FLOW_ERROR_TYPE_NONE; if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) { rte_errno = EINVAL; goto err; @@ -11830,9 +11886,6 @@ __flow_hw_configure(struct rte_eth_dev *dev, if (!priv->dr_ctx) goto err; priv->nb_queue = nb_q_updated; - rte_spinlock_init(&priv->hw_ctrl_lock); - LIST_INIT(&priv->hw_ctrl_flows); - LIST_INIT(&priv->hw_ext_ctrl_flows); ret = flow_hw_action_template_drop_init(dev, error); if (ret) goto err; @@ -11909,9 +11962,11 @@ __flow_hw_configure(struct rte_eth_dev *dev, goto err; } if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) { - if (mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters, - nb_queue, - (host_priv ? host_priv->hws_cpool : NULL))) + struct mlx5_hws_cnt_pool *hws_cpool = host_priv ? host_priv->hws_cpool : NULL; + + ret = mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters, + nb_queue, hws_cpool, error); + if (ret) goto err; } if (port_attr->nb_aging_objects) { @@ -11960,7 +12015,7 @@ __flow_hw_configure(struct rte_eth_dev *dev, if (_queue_attr) mlx5_free(_queue_attr); /* Do not overwrite the internal errno information. */ - if (ret) + if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE) return ret; return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -11991,6 +12046,10 @@ flow_hw_configure(struct rte_eth_dev *dev, const struct rte_flow_queue_attr *queue_attr[], struct rte_flow_error *error) { + struct rte_flow_error shadow_error = {0, }; + + if (!error) + error = &shadow_error; return __flow_hw_configure(dev, port_attr, nb_queue, queue_attr, false, error); } @@ -12855,6 +12914,10 @@ flow_hw_action_validate(struct rte_eth_dev *dev, const struct rte_flow_action *action, struct rte_flow_error *err) { + struct rte_flow_error shadow_error = {0, }; + + if (!err) + err = &shadow_error; return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL, conf, action, NULL, err); } @@ -13609,6 +13672,7 @@ flow_hw_allocate_actions(struct rte_eth_dev *dev, int ret; uint obj_num; + error->type = RTE_FLOW_ERROR_TYPE_NONE; if (action_flags & MLX5_FLOW_ACTION_AGE) { /* If no age objects were previously allocated. */ if (!priv->hws_age_req) { @@ -13616,7 +13680,8 @@ flow_hw_allocate_actions(struct rte_eth_dev *dev, if (!priv->hws_cpool) { obj_num = MLX5_CNT_NT_MAX(priv); ret = mlx5_hws_cnt_pool_create(dev, obj_num, - priv->nb_queue, NULL); + priv->nb_queue, + NULL, error); if (ret) goto err; } @@ -13632,7 +13697,8 @@ flow_hw_allocate_actions(struct rte_eth_dev *dev, if (!priv->hws_cpool) { obj_num = MLX5_CNT_NT_MAX(priv); ret = mlx5_hws_cnt_pool_create(dev, obj_num, - priv->nb_queue, NULL); + priv->nb_queue, NULL, + error); if (ret) goto err; } @@ -13658,6 +13724,8 @@ flow_hw_allocate_actions(struct rte_eth_dev *dev, } return 0; err: + if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE) + return ret; return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "fail to allocate actions"); @@ -13930,6 +13998,7 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev, .actions = actions, }, }; + struct rte_flow_error shadow_error = {0, }; /* * TODO: add a call to flow_hw_validate function once it exist. @@ -13937,6 +14006,8 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev, */ RTE_SET_USED(encap_idx); + if (!error) + error = &shadow_error; split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags, actions_n, external, &resource, error); if (split < 0) @@ -14016,6 +14087,7 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_JUMP: flow_hw_jump_release(dev, clone->action_ctx); break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: @@ -14049,6 +14121,7 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action) case RTE_FLOW_ACTION_TYPE_QUEUE: case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: return true; default: break; @@ -14091,6 +14164,8 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev, action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) return false; break; + case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX: + break; default: return false; } @@ -14825,8 +14900,14 @@ flow_hw_table_resize(struct rte_eth_dev *dev, struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE]; struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE]; struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr; + struct mlx5dr_action_jump_to_matcher_attr jump_attr = { + .type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX, + .matcher = NULL, + }; struct mlx5_multi_pattern_segment *segment = NULL; struct mlx5dr_matcher *matcher = NULL; + struct mlx5dr_action *jump = NULL; + struct mlx5_priv *priv = dev->data->dev_private; uint32_t i, selector = table->matcher_selector; uint32_t other_selector = (selector + 1) & 1; int ret; @@ -14874,6 +14955,17 @@ flow_hw_table_resize(struct rte_eth_dev *dev, table, "failed to create new matcher"); goto error; } + if (matcher_attr.isolated) { + jump_attr.matcher = matcher; + jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx, &jump_attr, + mlx5_hw_act_flag[!!table->cfg.attr.flow_attr.group][table->type]); + if (!jump) { + ret = rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + table, "failed to create jump to matcher action"); + goto error; + } + } rte_rwlock_write_lock(&table->matcher_replace_rwlk); ret = mlx5dr_matcher_resize_set_target (table->matcher_info[selector].matcher, matcher); @@ -14886,6 +14978,7 @@ flow_hw_table_resize(struct rte_eth_dev *dev, } table->cfg.attr.nb_flows = nb_flows; table->matcher_info[other_selector].matcher = matcher; + table->matcher_info[other_selector].jump = jump; table->matcher_selector = other_selector; rte_atomic_store_explicit(&table->matcher_info[other_selector].refcnt, 0, rte_memory_order_relaxed); @@ -14894,6 +14987,8 @@ flow_hw_table_resize(struct rte_eth_dev *dev, error: if (segment) mlx5_destroy_multi_pattern_segment(segment); + if (jump) + mlx5dr_action_destroy(jump); if (matcher) { ret = mlx5dr_matcher_destroy(matcher); return rte_flow_error_set(error, rte_errno, @@ -14924,6 +15019,8 @@ flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev, return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, table, "cannot complete table resize"); + if (matcher_info->jump) + mlx5dr_action_destroy(matcher_info->jump); ret = mlx5dr_matcher_destroy(matcher_info->matcher); if (ret) return rte_flow_error_set(error, rte_errno, @@ -15084,7 +15181,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, uint8_t item_template_idx, struct rte_flow_action actions[], uint8_t action_template_idx, - struct mlx5_hw_ctrl_flow_info *info, + struct mlx5_ctrl_flow_info *info, bool external) { struct mlx5_priv *priv = proxy_dev->data->dev_private; @@ -15093,7 +15190,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, .postpone = 0, }; struct rte_flow *flow = NULL; - struct mlx5_hw_ctrl_flow *entry = NULL; + struct mlx5_ctrl_flow_entry *entry = NULL; int ret; rte_spinlock_lock(&priv->hw_ctrl_lock); @@ -15129,7 +15226,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, if (info) entry->info = *info; else - entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL; + entry->info.type = MLX5_CTRL_FLOW_TYPE_GENERAL; if (external) LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next); else @@ -15206,8 +15303,8 @@ static int flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hw_ctrl_flow *cf; - struct mlx5_hw_ctrl_flow *cf_next; + struct mlx5_ctrl_flow_entry *cf; + struct mlx5_ctrl_flow_entry *cf_next; int ret; cf = LIST_FIRST(&priv->hw_ctrl_flows); @@ -15285,8 +15382,8 @@ static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hw_ctrl_flow *cf; - struct mlx5_hw_ctrl_flow *cf_next; + struct mlx5_ctrl_flow_entry *cf; + struct mlx5_ctrl_flow_entry *cf_next; int ret; cf = LIST_FIRST(&priv->hw_ctrl_flows); @@ -15342,8 +15439,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool }; struct rte_flow_item items[3] = { { 0 } }; struct rte_flow_action actions[3] = { { 0 } }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT, .esw_mgr_sq = sqn, }; struct rte_eth_dev *proxy_dev; @@ -15432,7 +15529,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool actions[1] = (struct rte_flow_action){ .type = RTE_FLOW_ACTION_TYPE_END, }; - flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; + flow_info.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS; ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl, items, 0, actions, 0, &flow_info, external); @@ -15445,15 +15542,15 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool } static bool -flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf, +flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf, struct rte_eth_dev *dev, uint32_t sqn) { if (cf->owner_dev != dev) return false; - if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn) + if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn) return true; - if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn) + if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn) return true; return false; } @@ -15465,8 +15562,8 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) uint16_t proxy_port_id = dev->data->port_id; struct rte_eth_dev *proxy_dev; struct mlx5_priv *proxy_priv; - struct mlx5_hw_ctrl_flow *cf; - struct mlx5_hw_ctrl_flow *cf_next; + struct mlx5_ctrl_flow_entry *cf; + struct mlx5_ctrl_flow_entry *cf_next; int ret; ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL); @@ -15527,8 +15624,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, } }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP, }; struct rte_eth_dev *proxy_dev; struct mlx5_priv *proxy_priv; @@ -15608,8 +15705,8 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY, }; MLX5_ASSERT(priv->master); @@ -15648,8 +15745,8 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool e { .type = RTE_FLOW_ACTION_TYPE_END }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH, .tx_repr_sq = sqn, }; @@ -15706,8 +15803,8 @@ mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_LACP_RX, }; if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl) @@ -15829,8 +15926,8 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, }; if (!eth_spec) @@ -15861,8 +15958,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, }; unsigned int i; @@ -15894,28 +15991,26 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, } static int -__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, - struct rte_flow_template_table *tbl, - const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type, - const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type) +__flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev, + struct rte_flow_template_table *tbl, + const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type, + const struct rte_ether_addr *addr) { - struct rte_flow_item_eth eth_spec; + struct rte_flow_item_eth eth_spec = { + .hdr.dst_addr = *addr, + }; struct rte_flow_item items[5]; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, - }; - const struct rte_ether_addr cmp = { - .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC, + .uc = { + .dmac = *addr, + }, }; - unsigned int i; - RTE_SET_USED(pattern_type); - - memset(ð_spec, 0, sizeof(eth_spec)); memset(items, 0, sizeof(items)); items[0] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_ETH, @@ -15925,69 +16020,102 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type); items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type); items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END }; + + if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false)) + return -rte_errno; + + return 0; +} + +static int +__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, + struct rte_flow_template_table *tbl, + const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type) +{ + unsigned int i; + int ret; + for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) { struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; - if (!memcmp(mac, &cmp, sizeof(*mac))) + if (rte_is_zero_ether_addr(mac)) continue; - memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN); - if (flow_hw_create_ctrl_flow(dev, dev, - tbl, items, 0, actions, 0, &flow_info, false)) - return -rte_errno; + + ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac); + if (ret < 0) + return ret; } return 0; } static int -__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, - struct rte_flow_template_table *tbl, - const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type, - const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow_item_eth eth_spec; +__flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev, + struct rte_flow_template_table *tbl, + const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type, + const struct rte_ether_addr *addr, + const uint16_t vid) +{ + struct rte_flow_item_eth eth_spec = { + .hdr.dst_addr = *addr, + }; + struct rte_flow_item_vlan vlan_spec = { + .tci = rte_cpu_to_be_16(vid), + }; struct rte_flow_item items[5]; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; - struct mlx5_hw_ctrl_flow_info flow_info = { - .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS, - }; - const struct rte_ether_addr cmp = { - .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + struct mlx5_ctrl_flow_info flow_info = { + .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN, + .uc = { + .dmac = *addr, + .vlan = vid, + }, }; - unsigned int i; - unsigned int j; - RTE_SET_USED(pattern_type); - - memset(ð_spec, 0, sizeof(eth_spec)); memset(items, 0, sizeof(items)); items[0] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_ETH, .spec = ð_spec, }; - items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN }; + items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &vlan_spec, + }; items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type); items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type); items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END }; + + if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false)) + return -rte_errno; + + return 0; +} + +static int +__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, + struct rte_flow_template_table *tbl, + const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int j; + for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) { struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; - if (!memcmp(mac, &cmp, sizeof(*mac))) + if (rte_is_zero_ether_addr(mac)) continue; - memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN); + for (j = 0; j < priv->vlan_filter_n; ++j) { uint16_t vlan = priv->vlan_filter[j]; - struct rte_flow_item_vlan vlan_spec = { - .hdr.vlan_tci = rte_cpu_to_be_16(vlan), - }; + int ret; - items[1].spec = &vlan_spec; - if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, - &flow_info, false)) - return -rte_errno; + ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type, + mac, vlan); + if (ret < 0) + return ret; } } return 0; @@ -16011,9 +16139,9 @@ __flow_hw_ctrl_flows(struct rte_eth_dev *dev, case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN: return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type); case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC: - return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type); + return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type); case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN: - return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type); + return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type); default: /* Should not reach here. */ MLX5_ASSERT(false); @@ -16094,6 +16222,171 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags) return 0; } +static int +mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev, + const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type, + const struct rte_ether_addr *addr, + const uint16_t vlan) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx; + unsigned int j; + int ret = 0; + + if (!priv->dr_ctx) { + DRV_LOG(DEBUG, "port %u Control flow rules will not be created. " + "HWS needs to be configured beforehand.", + dev->data->port_id); + return 0; + } + if (!priv->hw_ctrl_rx) { + DRV_LOG(ERR, "port %u Control flow rules templates were not created.", + dev->data->port_id); + rte_errno = EINVAL; + return -rte_errno; + } + hw_ctrl_rx = priv->hw_ctrl_rx; + + /* TODO: this part should be somehow refactored. It's common with common flow creation. */ + for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) { + const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j; + const unsigned int pti = eth_pattern_type; + struct rte_flow_actions_template *at; + struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j]; + const struct mlx5_flow_template_table_cfg cfg = { + .attr = tmpls->attr, + .external = 0, + }; + + if (!hw_ctrl_rx->rss[rss_type]) { + at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type); + if (!at) + return -rte_errno; + hw_ctrl_rx->rss[rss_type] = at; + } else { + at = hw_ctrl_rx->rss[rss_type]; + } + if (!rss_type_is_requested(priv, rss_type)) + continue; + if (!tmpls->tbl) { + tmpls->tbl = flow_hw_table_create(dev, &cfg, + &tmpls->pt, 1, &at, 1, NULL); + if (!tmpls->tbl) { + DRV_LOG(ERR, "port %u Failed to create template table " + "for control flow rules. Unable to create " + "control flow rules.", + dev->data->port_id); + return -rte_errno; + } + } + + MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC || + eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN); + + if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC) + ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr); + else + ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type, + addr, vlan); + if (ret) { + DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.", + dev->data->port_id); + return ret; + } + } + + return 0; +} + +int +mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr) +{ + return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC, + addr, 0); +} + +int +mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + struct mlx5_ctrl_flow_entry *tmp; + int ret; + + /* + * HWS does not have automatic RSS flow expansion, + * so each variant of the control flow rule is a separate entry in the list. + * In that case, the whole list must be traversed. + */ + entry = LIST_FIRST(&priv->hw_ctrl_flows); + while (entry != NULL) { + tmp = LIST_NEXT(entry, next); + + if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC || + !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) { + entry = tmp; + continue; + } + + ret = flow_hw_destroy_ctrl_flow(dev, entry->flow); + LIST_REMOVE(entry, next); + mlx5_free(entry); + if (ret) + return ret; + + entry = tmp; + } + return 0; +} + +int +mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vlan) +{ + return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN, + addr, vlan); +} + +int +mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vlan) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + struct mlx5_ctrl_flow_entry *tmp; + int ret; + + /* + * HWS does not have automatic RSS flow expansion, + * so each variant of the control flow rule is a separate entry in the list. + * In that case, the whole list must be traversed. + */ + entry = LIST_FIRST(&priv->hw_ctrl_flows); + while (entry != NULL) { + tmp = LIST_NEXT(entry, next); + + if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN || + !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) || + vlan != entry->info.uc.vlan) { + entry = tmp; + continue; + } + + ret = flow_hw_destroy_ctrl_flow(dev, entry->flow); + LIST_REMOVE(entry, next); + mlx5_free(entry); + if (ret) + return ret; + + entry = tmp; + } + return 0; +} + static __rte_always_inline uint32_t mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain) { @@ -16568,6 +16861,8 @@ flow_hw_async_op_validate(struct rte_eth_dev *dev, * The queue to create the flow. * @param[in] table * Pointer to template table. + * @param[in] rule_index + * The item pattern flow follows from the table. * @param[in] items * Items with flow spec value. * @param[in] pattern_template_index @@ -16587,6 +16882,8 @@ static int flow_hw_async_create_validate(struct rte_eth_dev *dev, const uint32_t queue, const struct rte_flow_template_table *table, + enum rte_flow_table_insertion_type insertion_type, + uint32_t rule_index, const struct rte_flow_item items[], const uint8_t pattern_template_index, const struct rte_flow_action actions[], @@ -16596,63 +16893,18 @@ flow_hw_async_create_validate(struct rte_eth_dev *dev, if (flow_hw_async_op_validate(dev, queue, table, error)) return -rte_errno; - if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN) - return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Only pattern insertion is allowed on this table"); - - if (flow_hw_validate_rule_pattern(dev, table, pattern_template_index, items, error)) - return -rte_errno; - - if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error)) - return -rte_errno; - - return 0; -} + if (insertion_type != table->cfg.attr.insertion_type) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Flow rule insertion type mismatch with table configuration"); -/** - * Validate user input for rte_flow_async_create_by_index() implementation. - * - * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op. - * - * @param[in] dev - * Pointer to the rte_eth_dev structure. - * @param[in] queue - * The queue to create the flow. - * @param[in] table - * Pointer to template table. - * @param[in] rule_index - * Rule index in the table. - * Inserting a rule to already occupied index results in undefined behavior. - * @param[in] actions - * Action with flow spec value. - * @param[in] action_template_index - * The action pattern flow follows from the table. - * @param[out] error - * Pointer to error structure. - * - * @return - * 0 if user input is valid. - * Negative errno otherwise, rte_errno and error struct is set. - */ -static int -flow_hw_async_create_by_index_validate(struct rte_eth_dev *dev, - const uint32_t queue, - const struct rte_flow_template_table *table, - const uint32_t rule_index, - const struct rte_flow_action actions[], - const uint8_t action_template_index, - struct rte_flow_error *error) -{ - if (flow_hw_async_op_validate(dev, queue, table, error)) - return -rte_errno; + if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN) + if (rule_index >= table->cfg.attr.nb_flows) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Flow rule index exceeds table size"); if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_INDEX) - return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Only index insertion is allowed on this table"); - - if (rule_index >= table->cfg.attr.nb_flows) - return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Flow rule index exceeds table size"); + if (flow_hw_validate_rule_pattern(dev, table, pattern_template_index, items, error)) + return -rte_errno; if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error)) return -rte_errno; @@ -16660,7 +16912,6 @@ flow_hw_async_create_by_index_validate(struct rte_eth_dev *dev, return 0; } - /** * Validate user input for rte_flow_async_update() implementation. * @@ -16733,6 +16984,7 @@ flow_hw_async_destroy_validate(struct rte_eth_dev *dev, static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = { .async_create = flow_hw_async_flow_create, .async_create_by_index = flow_hw_async_flow_create_by_index, + .async_create_by_index_with_pattern = flow_hw_async_flow_create_by_index_with_pattern, .async_actions_update = flow_hw_async_flow_update, .async_destroy = flow_hw_async_flow_destroy, .push = flow_hw_push, diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c b/drivers/net/mlx5/mlx5_flow_hw_stubs.c new file mode 100644 index 0000000000..f17bc27899 --- /dev/null +++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2024 NVIDIA Corporation & Affiliates + */ + +/** + * @file + * + * mlx5_flow_hw.c source file is included in the build only on Linux. + * Functions defined there are compiled if and only if available rdma-core supports DV. + * + * This file contains stubs for any functions exported from that file. + */ + +#include "mlx5_flow.h" + +/* + * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case: + * - PMD is compiled on Windows or + * - available rdma-core does not support HWS. + */ +int +mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused, + const struct rte_ether_addr *addr __rte_unused) +{ + rte_errno = ENOTSUP; + return -rte_errno; +} + +/* + * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case: + * - PMD is compiled on Windows or + * - available rdma-core does not support HWS. + */ +int +mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev __rte_unused, + const struct rte_ether_addr *addr __rte_unused) +{ + rte_errno = ENOTSUP; + return -rte_errno; +} + +/* + * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case: + * - PMD is compiled on Windows or + * - available rdma-core does not support HWS. + */ +int +mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused, + const struct rte_ether_addr *addr __rte_unused, + const uint16_t vlan __rte_unused) +{ + rte_errno = ENOTSUP; + return -rte_errno; +} + +/* + * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case: + * - PMD is compiled on Windows or + * - available rdma-core does not support HWS. + */ +int +mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev __rte_unused, + const struct rte_ether_addr *addr __rte_unused, + const uint16_t vlan __rte_unused) +{ + rte_errno = ENOTSUP; + return -rte_errno; +} + +/* + * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case: + * - PMD is compiled on Windows or + * - available rdma-core does not support HWS. + */ +bool +mlx5_hw_ctx_validate(__rte_unused const struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + return false; +} diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c index 19d8607070..804f4371a4 100644 --- a/drivers/net/mlx5/mlx5_flow_meter.c +++ b/drivers/net/mlx5/mlx5_flow_meter.c @@ -378,8 +378,8 @@ mlx5_flow_meter_profile_find(struct mlx5_priv *priv, uint32_t meter_profile_id) if (priv->mtr_profile_arr) return &priv->mtr_profile_arr[meter_profile_id]; - if (mlx5_l3t_get_entry(priv->mtr_profile_tbl, - meter_profile_id, &data) || !data.ptr) + if (!priv->mtr_profile_tbl || + mlx5_l3t_get_entry(priv->mtr_profile_tbl, meter_profile_id, &data) || !data.ptr) return NULL; fmp = data.ptr; /* Remove reference taken by the mlx5_l3t_get_entry. */ @@ -745,6 +745,10 @@ mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hca_qos_attr *qattr = &priv->sh->cdev->config.hca_attr.qos; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (!priv->mtr_en) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, @@ -903,6 +907,12 @@ mlx5_flow_meter_profile_get(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) { + rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); + return NULL; + } if (!priv->mtr_en) { rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, @@ -939,6 +949,10 @@ mlx5_flow_meter_profile_hws_add(struct rte_eth_dev *dev, struct mlx5_flow_meter_profile *fmp; int ret; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (priv->shared_host) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, "Meter profiles cannot be created on guest port"); @@ -1167,6 +1181,10 @@ mlx5_flow_meter_policy_hws_validate(struct rte_eth_dev *dev, int ret; int i; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (!priv->mtr_en || !priv->sh->meter_aso_en) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_POLICY, @@ -1496,6 +1514,12 @@ mlx5_flow_meter_policy_get(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; uint32_t policy_idx; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) { + rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); + return NULL; + } if (!priv->mtr_en) { rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1645,6 +1669,10 @@ mlx5_flow_meter_policy_hws_add(struct rte_eth_dev *dev, [1] = { .type = RTE_FLOW_ITEM_TYPE_END } }; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (!priv->mtr_policy_arr) return mlx5_flow_meter_policy_add(dev, policy_id, policy, error); mtr_policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL); @@ -1914,6 +1942,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv, if (sh->meter_aso_en) { fm->is_enable = !!is_enable; aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); + aso_mtr->state = ASO_METER_WAIT; ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, aso_mtr, &priv->mtr_bulk, NULL, true); @@ -2165,6 +2194,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, /* If ASO meter supported, update ASO flow meter by wqe. */ if (priv->sh->meter_aso_en) { aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); + aso_mtr->state = ASO_METER_WAIT; ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, aso_mtr, &priv->mtr_bulk, NULL, true); if (ret) @@ -2230,6 +2260,10 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, struct mlx5_hw_q_job *job; int ret; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (!priv->mtr_profile_arr || !priv->mtr_policy_arr || !priv->mtr_bulk.aso) @@ -2520,6 +2554,10 @@ mlx5_flow_meter_enable(struct rte_eth_dev *dev, struct mlx5_flow_meter_info *fm; int ret; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (!priv->mtr_en) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, @@ -2609,6 +2647,10 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev, MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR; int ret; + if (mlx5_hws_active(dev) && !mlx5_hw_ctx_validate(dev, NULL)) + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "non-template flow engine was not configured"); if (!priv->mtr_en) return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c index a46a4bd94e..def0b19deb 100644 --- a/drivers/net/mlx5/mlx5_hws_cnt.c +++ b/drivers/net/mlx5/mlx5_hws_cnt.c @@ -258,7 +258,8 @@ mlx5_hws_cnt_raw_data_free(struct mlx5_dev_ctx_shared *sh, __rte_unused static struct mlx5_hws_cnt_raw_data_mng * -mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n) +mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n, + struct rte_flow_error *error) { struct mlx5_hws_cnt_raw_data_mng *mng = NULL; int ret; @@ -268,16 +269,26 @@ mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n) MLX5_ASSERT(pgsz > 0); mng = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*mng), 0, SOCKET_ID_ANY); - if (mng == NULL) + if (mng == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "failed to allocate counters memory manager"); goto error; + } mng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, pgsz, SOCKET_ID_ANY); - if (mng->raw == NULL) + if (mng->raw == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "failed to allocate raw counters memory"); goto error; + } ret = sh->cdev->mr_scache.reg_mr_cb(sh->cdev->pd, mng->raw, sz, &mng->mr); if (ret) { - rte_errno = errno; + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "failed to register counters memory region"); goto error; } return mng; @@ -391,7 +402,8 @@ mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg, static struct mlx5_hws_cnt_pool * mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, const struct mlx5_hws_cnt_pool_cfg *pcfg, - const struct mlx5_hws_cache_param *ccfg) + const struct mlx5_hws_cache_param *ccfg, + struct rte_flow_error *error) { char mz_name[RTE_MEMZONE_NAMESIZE]; struct mlx5_hws_cnt_pool *cntp; @@ -401,8 +413,12 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, MLX5_ASSERT(ccfg); cntp = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*cntp), 0, SOCKET_ID_ANY); - if (cntp == NULL) + if (cntp == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to allocate counter pool context"); return NULL; + } cntp->cfg = *pcfg; if (cntp->cfg.host_cpool) @@ -411,12 +427,18 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, DRV_LOG(ERR, "Counter number %u " "is greater than the maximum supported (%u).", pcfg->request_num, sh->hws_max_nb_counters); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "requested counters number exceeds supported capacity"); goto error; } cnt_num = pcfg->request_num * (100 + pcfg->alloc_factor) / 100; if (cnt_num > UINT32_MAX) { DRV_LOG(ERR, "counter number %"PRIu64" is out of 32bit range", cnt_num); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "counters number must fit in 32 bits"); goto error; } /* @@ -427,15 +449,21 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, cntp->pool = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(struct mlx5_hws_cnt) * cnt_num, 0, SOCKET_ID_ANY); - if (cntp->pool == NULL) + if (cntp->pool == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to allocate counter pool context"); goto error; + } snprintf(mz_name, sizeof(mz_name), "%s_F_RING", pcfg->name); cntp->free_list = rte_ring_create_elem(mz_name, sizeof(cnt_id_t), (uint32_t)cnt_num, SOCKET_ID_ANY, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ | RING_F_EXACT_SZ); if (cntp->free_list == NULL) { - DRV_LOG(ERR, "failed to create free list ring"); + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to allocate free counters ring"); goto error; } snprintf(mz_name, sizeof(mz_name), "%s_R_RING", pcfg->name); @@ -443,7 +471,9 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, (uint32_t)cnt_num, SOCKET_ID_ANY, RING_F_MP_HTS_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ); if (cntp->wait_reset_list == NULL) { - DRV_LOG(ERR, "failed to create wait reset list ring"); + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to allocate counters wait reset ring"); goto error; } snprintf(mz_name, sizeof(mz_name), "%s_U_RING", pcfg->name); @@ -451,14 +481,20 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, (uint32_t)cnt_num, SOCKET_ID_ANY, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ | RING_F_EXACT_SZ); if (cntp->reuse_list == NULL) { - DRV_LOG(ERR, "failed to create reuse list ring"); + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to allocate counters reuse ring"); goto error; } /* Allocate counter cache only if needed. */ if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) { cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg); - if (cntp->cache == NULL) + if (cntp->cache == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "failed to allocate counters cache"); goto error; + } } /* Initialize the time for aging-out calculation. */ cntp->time_of_last_age_check = MLX5_CURR_TIME_SEC; @@ -506,7 +542,8 @@ mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh) static int mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh, - struct mlx5_hws_cnt_pool *cpool) + struct mlx5_hws_cnt_pool *cpool, + struct rte_flow_error *error) { struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; uint32_t max_log_bulk_sz = sh->hws_max_log_bulk_sz; @@ -517,10 +554,10 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh, struct mlx5_devx_obj *dcs; MLX5_ASSERT(cpool->cfg.host_cpool == NULL); - if (hca_attr->flow_counter_bulk_log_max_alloc == 0) { - DRV_LOG(ERR, "Fw doesn't support bulk log max alloc"); - return -1; - } + if (hca_attr->flow_counter_bulk_log_max_alloc == 0) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "FW doesn't support bulk log max alloc"); cnt_num = RTE_ALIGN_CEIL(cnt_num, 4); /* minimal 4 counter in bulk. */ log_bulk_sz = RTE_MIN(max_log_bulk_sz, rte_log2_u32(cnt_num)); attr.pd = sh->cdev->pdn; @@ -529,8 +566,12 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh, attr.flow_counter_bulk_log_size = log_bulk_sz; idx = 0; dcs = mlx5_devx_cmd_flow_counter_alloc_general(sh->cdev->ctx, &attr); - if (dcs == NULL) + if (dcs == NULL) { + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "FW failed to allocate counters"); goto error; + } cpool->dcs_mng.dcs[idx].obj = dcs; cpool->dcs_mng.dcs[idx].batch_sz = (1 << log_bulk_sz); cpool->dcs_mng.batch_total++; @@ -545,8 +586,12 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh, continue; dcs = mlx5_devx_cmd_flow_counter_alloc_general (sh->cdev->ctx, &attr); - if (dcs == NULL) + if (dcs == NULL) { + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "FW failed to allocate counters"); goto error; + } cpool->dcs_mng.dcs[idx].obj = dcs; cpool->dcs_mng.dcs[idx].batch_sz = alloc_candidate; cpool->dcs_mng.dcs[idx].iidx = alloced; @@ -633,15 +678,16 @@ mlx5_hws_cnt_pool_action_create(struct mlx5_priv *priv, int mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, - uint32_t nb_counters, uint16_t nb_queue, - struct mlx5_hws_cnt_pool *chost) + uint32_t nb_counters, uint16_t nb_queue, + struct mlx5_hws_cnt_pool *chost, + struct rte_flow_error *error) { struct mlx5_hws_cnt_pool *cpool = NULL; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hws_cache_param cparam = {0}; struct mlx5_hws_cnt_pool_cfg pcfg = {0}; char *mp_name; - int ret = -1; + int ret = 0; size_t sz; mp_name = mlx5_malloc(MLX5_MEM_ZERO, RTE_MEMZONE_NAMESIZE, 0, SOCKET_ID_ANY); @@ -653,17 +699,21 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, pcfg.alloc_factor = HWS_CNT_ALLOC_FACTOR_DEFAULT; if (chost) { pcfg.host_cpool = chost; - cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam); + cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam, error); if (cpool == NULL) goto error; ret = mlx5_hws_cnt_pool_action_create(priv, cpool); - if (ret != 0) + if (ret != 0) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "failed to allocate counter actions on guest port"); goto error; + } goto success; } /* init cnt service if not. */ if (priv->sh->cnt_svc == NULL) { - ret = mlx5_hws_cnt_svc_init(priv->sh); + ret = mlx5_hws_cnt_svc_init(priv->sh, error); if (ret) return ret; } @@ -672,14 +722,14 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, cparam.q_num = nb_queue; cparam.threshold = HWS_CNT_CACHE_THRESHOLD_DEFAULT; cparam.size = HWS_CNT_CACHE_SZ_DEFAULT; - cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam); + cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam, error); if (cpool == NULL) goto error; - ret = mlx5_hws_cnt_pool_dcs_alloc(priv->sh, cpool); + ret = mlx5_hws_cnt_pool_dcs_alloc(priv->sh, cpool, error); if (ret != 0) goto error; sz = RTE_ALIGN_CEIL(mlx5_hws_cnt_pool_get_size(cpool), 4); - cpool->raw_mng = mlx5_hws_cnt_raw_data_alloc(priv->sh, sz); + cpool->raw_mng = mlx5_hws_cnt_raw_data_alloc(priv->sh, sz, error); if (cpool->raw_mng == NULL) goto error; __hws_cnt_id_load(cpool); @@ -691,8 +741,12 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, */ cpool->query_gen = 1; ret = mlx5_hws_cnt_pool_action_create(priv, cpool); - if (ret != 0) + if (ret != 0) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "failed to allocate counter actions"); goto error; + } priv->sh->cnt_svc->refcnt++; cpool->priv = priv; rte_spinlock_lock(&priv->sh->cpool_lock); @@ -702,6 +756,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, priv->hws_cpool = cpool; return 0; error: + MLX5_ASSERT(ret); mlx5_hws_cnt_pool_destroy(priv->sh, cpool); priv->hws_cpool = NULL; return ret; @@ -736,21 +791,22 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, } int -mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh) +mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh, + struct rte_flow_error *error) { int ret; sh->cnt_svc = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*sh->cnt_svc), 0, SOCKET_ID_ANY); if (sh->cnt_svc == NULL) - return -1; + goto err; sh->cnt_svc->query_interval = sh->config.cnt_svc.cycle_time; sh->cnt_svc->service_core = sh->config.cnt_svc.service_core; ret = mlx5_aso_cnt_queue_init(sh); if (ret != 0) { mlx5_free(sh->cnt_svc); sh->cnt_svc = NULL; - return -1; + goto err; } ret = mlx5_hws_cnt_service_thread_create(sh); if (ret != 0) { @@ -759,6 +815,11 @@ mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh) sh->cnt_svc = NULL; } return 0; +err: + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "failed to init counters service"); + } void diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h index 996ac8dd9a..d8da9dfcdd 100644 --- a/drivers/net/mlx5/mlx5_hws_cnt.h +++ b/drivers/net/mlx5/mlx5_hws_cnt.h @@ -715,14 +715,15 @@ mlx5_hws_cnt_service_thread_destroy(struct mlx5_dev_ctx_shared *sh); int mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, uint32_t nb_counters, uint16_t nb_queue, - struct mlx5_hws_cnt_pool *chost); + struct mlx5_hws_cnt_pool *chost, struct rte_flow_error *error); void mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, struct mlx5_hws_cnt_pool *cpool); int -mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh); +mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh, + struct rte_flow_error *error); void mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh); diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 22a756a52b..0e5d2be530 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -25,15 +25,25 @@ * Pointer to Ethernet device structure. * @param index * MAC address index. + * @param addr + * If MAC address is actually removed, it will be stored here if pointer is not a NULL. + * + * @return + * True if there was a MAC address under given index. */ -static void -mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +static bool +mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, + uint32_t index, + struct rte_ether_addr *addr) { MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES); if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index])) - return; + return false; mlx5_os_mac_addr_remove(dev, index); + if (addr != NULL) + *addr = dev->data->mac_addrs[index]; memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr)); + return true; } /** @@ -91,15 +101,15 @@ mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { + struct rte_ether_addr addr = { 0 }; int ret; if (index >= MLX5_MAX_UC_MAC_ADDRESSES) return; - mlx5_internal_mac_addr_remove(dev, index); - if (!dev->data->promiscuous) { - ret = mlx5_traffic_restart(dev); + if (mlx5_internal_mac_addr_remove(dev, index, &addr)) { + ret = mlx5_traffic_mac_remove(dev, &addr); if (ret) - DRV_LOG(ERR, "port %u cannot restart traffic: %s", + DRV_LOG(ERR, "port %u cannot update control flow rules: %s", dev->data->port_id, strerror(rte_errno)); } } @@ -132,9 +142,7 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, ret = mlx5_internal_mac_addr_add(dev, mac, index); if (ret < 0) return ret; - if (!dev->data->promiscuous) - return mlx5_traffic_restart(dev); - return 0; + return mlx5_traffic_mac_add(dev, mac); } /** @@ -154,6 +162,12 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) uint16_t port_id; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_priv *pf_priv; + struct rte_ether_addr old_mac_addr = dev->data->mac_addrs[0]; + int ret; + + /* ethdev does not check if new default address is the same as the old one. */ + if (rte_is_same_ether_addr(mac_addr, &old_mac_addr)) + return 0; /* * Configuring the VF instead of its representor, @@ -188,7 +202,10 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) DRV_LOG(DEBUG, "port %u setting primary MAC address", dev->data->port_id); - return mlx5_mac_addr_add(dev, mac_addr, 0, 0); + ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0); + if (ret) + return ret; + return mlx5_traffic_mac_remove(dev, &old_mac_addr); } /** @@ -208,7 +225,7 @@ mlx5_set_mc_addr_list(struct rte_eth_dev *dev, return -rte_errno; } for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i) - mlx5_internal_mac_addr_remove(dev, i); + mlx5_internal_mac_addr_remove(dev, i, NULL); i = MLX5_MAX_UC_MAC_ADDRESSES; while (nb_mc_addr--) { ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index c6655b7db4..5eac224b76 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -655,6 +655,14 @@ mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc, struct mlx5_rxq_priv *rxq; bool empty; + if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) { + DRV_LOG(ERR, + "port %u number of descriptors requested for Rx queue" + " %u is more than supported", + dev->data->port_id, idx); + rte_errno = EINVAL; + return -EINVAL; + } if (!rte_is_power_of_2(*desc)) { *desc = 1 << log2above(*desc); DRV_LOG(WARNING, diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index 510f60b25d..0ce9827ed9 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -620,7 +621,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, /* * Note that vectors have reverse order - {v3, v2, v1, v0}, because - * there's no instruction to count trailing zeros. __builtin_clzl() is + * there's no instruction to count trailing zeros. rte_clz64() is * used instead. * * A. copy 4 mbuf pointers from elts ring to returning pkts. @@ -808,13 +809,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, /* E.2 mask out invalid entries. */ comp_mask = vbic_u16(comp_mask, invalid_mask); /* E.3 get the first compressed CQE. */ - comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16( - comp_mask), 0)) / - (sizeof(uint16_t) * 8); + comp_idx = rte_clz64(vget_lane_u64(vreinterpret_u64_u16(comp_mask), 0)) / + (sizeof(uint16_t) * 8); invalid_mask = vorr_u16(invalid_mask, comp_mask); /* D.7 count non-compressed valid CQEs. */ - n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16( - invalid_mask), 0)) / (sizeof(uint16_t) * 8); + n = rte_clz64(vget_lane_u64(vreinterpret_u64_u16(invalid_mask), 0)) / + (sizeof(uint16_t) * 8); nocmp_n += n; /* * D.2 mask out entries after the compressed CQE. diff --git a/drivers/net/mlx5/mlx5_trace.h b/drivers/net/mlx5/mlx5_trace.h index a8f0b372c8..4fc3584acc 100644 --- a/drivers/net/mlx5/mlx5_trace.h +++ b/drivers/net/mlx5/mlx5_trace.h @@ -22,21 +22,24 @@ extern "C" { /* TX burst subroutines trace points. */ RTE_TRACE_POINT_FP( rte_pmd_mlx5_trace_tx_entry, - RTE_TRACE_POINT_ARGS(uint16_t port_id, uint16_t queue_id), + RTE_TRACE_POINT_ARGS(uint64_t real_time, uint16_t port_id, uint16_t queue_id), + rte_trace_point_emit_u64(real_time); rte_trace_point_emit_u16(port_id); rte_trace_point_emit_u16(queue_id); ) RTE_TRACE_POINT_FP( rte_pmd_mlx5_trace_tx_exit, - RTE_TRACE_POINT_ARGS(uint16_t nb_sent, uint16_t nb_req), + RTE_TRACE_POINT_ARGS(uint64_t real_time, uint16_t nb_sent, uint16_t nb_req), + rte_trace_point_emit_u64(real_time); rte_trace_point_emit_u16(nb_sent); rte_trace_point_emit_u16(nb_req); ) RTE_TRACE_POINT_FP( rte_pmd_mlx5_trace_tx_wqe, - RTE_TRACE_POINT_ARGS(uint32_t opcode), + RTE_TRACE_POINT_ARGS(uint64_t real_time, uint32_t opcode), + rte_trace_point_emit_u64(real_time); rte_trace_point_emit_u32(opcode); ) diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index bf836c92fc..79b3d4d982 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -20,6 +20,8 @@ #include "mlx5_utils.h" #include "rte_pmd_mlx5.h" +static void mlx5_traffic_disable_legacy(struct rte_eth_dev *dev); + /** * Stop traffic on Tx queues. * @@ -1158,11 +1160,17 @@ mlx5_dev_start(struct rte_eth_dev *dev) DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); #ifdef HAVE_MLX5_HWS_SUPPORT if (priv->sh->config.dv_flow_en == 2) { + struct rte_flow_error error = { 0, }; + /*If previous configuration does not exist. */ if (!(priv->dr_ctx)) { - ret = flow_hw_init(dev, NULL); - if (ret) + ret = flow_hw_init(dev, &error); + if (ret) { + DRV_LOG(ERR, "Failed to start port %u %s: %s", + dev->data->port_id, dev->data->name, + error.message); return ret; + } } /* If there is no E-Switch, then there are no start/stop order limitations. */ if (!priv->sh->config.dv_esw_en) @@ -1736,11 +1744,31 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); + mlx5_traffic_disable_legacy(dev); rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } +static void +mlx5_traffic_disable_legacy(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ctrl_flow_entry *entry; + struct mlx5_ctrl_flow_entry *tmp; + + /* + * Free registered control flow rules first, + * to free the memory allocated for list entries + */ + entry = LIST_FIRST(&priv->hw_ctrl_flows); + while (entry != NULL) { + tmp = LIST_NEXT(entry, next); + mlx5_legacy_ctrl_flow_destroy(dev, entry); + entry = tmp; + } + + mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); +} /** * Disable traffic flows configured by control plane @@ -1758,7 +1786,7 @@ mlx5_traffic_disable(struct rte_eth_dev *dev) mlx5_flow_hw_flush_ctrl_flows(dev); else #endif - mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); + mlx5_traffic_disable_legacy(dev); } /** @@ -1782,3 +1810,239 @@ mlx5_traffic_restart(struct rte_eth_dev *dev) } return 0; } + +static bool +mac_flows_update_needed(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!dev->data->dev_started) + return false; + if (dev->data->promiscuous) + return false; + if (priv->isolated) + return false; + + return true; +} + +static int +traffic_dmac_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return mlx5_flow_hw_ctrl_flow_dmac(dev, addr); + else + return mlx5_legacy_dmac_flow_create(dev, addr); +} + +static int +traffic_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return mlx5_flow_hw_ctrl_flow_dmac_destroy(dev, addr); + else + return mlx5_legacy_dmac_flow_destroy(dev, addr); +} + +static int +traffic_dmac_vlan_create(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return mlx5_flow_hw_ctrl_flow_dmac_vlan(dev, addr, vid); + else + return mlx5_legacy_dmac_vlan_flow_create(dev, addr, vid); +} + +static int +traffic_dmac_vlan_destroy(struct rte_eth_dev *dev, + const struct rte_ether_addr *addr, + const uint16_t vid) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + return mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(dev, addr, vid); + else + return mlx5_legacy_dmac_vlan_flow_destroy(dev, addr, vid); +} + +/** + * Adjust Rx control flow rules to allow traffic on provided MAC address. + */ +int +mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!mac_flows_update_needed(dev)) + return 0; + + if (priv->vlan_filter_n > 0) { + unsigned int i; + + for (i = 0; i < priv->vlan_filter_n; ++i) { + uint16_t vlan = priv->vlan_filter[i]; + int ret; + + if (mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan)) + continue; + + ret = traffic_dmac_vlan_create(dev, addr, vlan); + if (ret != 0) + return ret; + } + + return 0; + } + + if (mlx5_ctrl_flow_uc_dmac_exists(dev, addr)) + return 0; + + return traffic_dmac_create(dev, addr); +} + +/** + * Adjust Rx control flow rules to disallow traffic with removed MAC address. + */ +int +mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (!mac_flows_update_needed(dev)) + return 0; + + if (priv->vlan_filter_n > 0) { + unsigned int i; + + for (i = 0; i < priv->vlan_filter_n; ++i) { + uint16_t vlan = priv->vlan_filter[i]; + int ret; + + if (!mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan)) + continue; + + ret = traffic_dmac_vlan_destroy(dev, addr, vlan); + if (ret != 0) + return ret; + } + + return 0; + } + + if (!mlx5_ctrl_flow_uc_dmac_exists(dev, addr)) + return 0; + + return traffic_dmac_destroy(dev, addr); +} + +/** + * Adjust Rx control flow rules to allow traffic on provided VLAN. + * + * Assumptions: + * - Called when VLAN is added. + * - At least one VLAN is enabled before function call. + * + * This functions assumes that VLAN is new and was not included in + * Rx control flow rules set up before calling it. + */ +int +mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + int ret; + + if (!mac_flows_update_needed(dev)) + return 0; + + /* Add all unicast DMAC flow rules with new VLAN attached. */ + for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { + struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; + + if (rte_is_zero_ether_addr(mac)) + continue; + + ret = traffic_dmac_vlan_create(dev, mac, vid); + if (ret != 0) + return ret; + } + + if (priv->vlan_filter_n == 1) { + /* + * Adding first VLAN. Need to remove unicast DMAC rules before adding new rules. + * Removing after creating VLAN rules so that traffic "gap" is not introduced. + */ + + for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { + struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; + + if (rte_is_zero_ether_addr(mac)) + continue; + + ret = traffic_dmac_destroy(dev, mac); + if (ret != 0) + return ret; + } + } + + return 0; +} + +/** + * Adjust Rx control flow rules to disallow traffic with removed VLAN. + * + * Assumptions: + * + * - VLAN was really removed. + */ +int +mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i; + int ret; + + if (!mac_flows_update_needed(dev)) + return 0; + + if (priv->vlan_filter_n == 0) { + /* + * If there are no VLANs as a result, unicast DMAC flow rules must be recreated. + * Recreating first to ensure no traffic "gap". + */ + + for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { + struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; + + if (rte_is_zero_ether_addr(mac)) + continue; + + ret = traffic_dmac_create(dev, mac); + if (ret != 0) + return ret; + } + } + + /* Remove all unicast DMAC flow rules with this VLAN. */ + for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { + struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; + + if (rte_is_zero_ether_addr(mac)) + continue; + + ret = traffic_dmac_vlan_destroy(dev, mac, vid); + if (ret != 0) + return ret; + } + + return 0; +} diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c index 04f80bb9bd..fc105970a3 100644 --- a/drivers/net/mlx5/mlx5_tx.c +++ b/drivers/net/mlx5/mlx5_tx.c @@ -619,7 +619,7 @@ mlx5_select_tx_function(struct rte_eth_dev *dev) * Check whether it has minimal amount * of not requested offloads. */ - tmp = __builtin_popcountl(tmp & ~olx); + tmp = rte_popcount64(tmp & ~olx); if (m >= RTE_DIM(txoff_func) || tmp < diff) { /* First or better match, save and continue. */ m = i; diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h index 983913faa2..55568c41b1 100644 --- a/drivers/net/mlx5/mlx5_tx.h +++ b/drivers/net/mlx5/mlx5_tx.h @@ -372,6 +372,46 @@ mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts) return ci; } +/** + * Read real time clock counter directly from the device PCI BAR area. + * The PCI BAR must be mapped to the process memory space at initialization. + * + * @param dev + * Device to read clock counter from + * + * @return + * 0 - if HCA BAR is not supported or not mapped. + * !=0 - read 64-bit value of real-time in UTC formatv (nanoseconds) + */ +static __rte_always_inline uint64_t mlx5_read_pcibar_clock(struct rte_eth_dev *dev) +{ + struct mlx5_proc_priv *ppriv = dev->process_private; + + if (ppriv && ppriv->hca_bar) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + uint64_t *hca_ptr = (uint64_t *)(ppriv->hca_bar) + + __mlx5_64_off(initial_seg, real_time); + uint64_t __rte_atomic *ts_addr; + uint64_t ts; + + ts_addr = (uint64_t __rte_atomic *)hca_ptr; + ts = rte_atomic_load_explicit(ts_addr, rte_memory_order_seq_cst); + ts = rte_be_to_cpu_64(ts); + ts = mlx5_txpp_convert_rx_ts(sh, ts); + return ts; + } + return 0; +} + +static __rte_always_inline uint64_t mlx5_read_pcibar_clock_from_txq(struct mlx5_txq_data *txq) +{ + struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + struct rte_eth_dev *dev = ETH_DEV(txq_ctrl->priv); + + return mlx5_read_pcibar_clock(dev); +} + /** * Set Software Parser flags and offsets in Ethernet Segment of WQE. * Flags must be preliminary initialized to zero. @@ -809,6 +849,7 @@ mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq, unsigned int olx) { struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg; + uint64_t real_time; /* For legacy MPW replace the EMPW by TSO with modifier. */ if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW) @@ -822,9 +863,12 @@ mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq, cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET); cs->misc = RTE_BE32(0); - if (__rte_trace_point_fp_is_enabled() && !loc->pkts_sent) - rte_pmd_mlx5_trace_tx_entry(txq->port_id, txq->idx); - rte_pmd_mlx5_trace_tx_wqe((txq->wqe_ci << 8) | opcode); + if (__rte_trace_point_fp_is_enabled()) { + real_time = mlx5_read_pcibar_clock_from_txq(txq); + if (!loc->pkts_sent) + rte_pmd_mlx5_trace_tx_entry(real_time, txq->port_id, txq->idx); + rte_pmd_mlx5_trace_tx_wqe(real_time, (txq->wqe_ci << 8) | opcode); + } } /** @@ -3786,7 +3830,8 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq, __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx); /* Trace productive bursts only. */ if (__rte_trace_point_fp_is_enabled() && loc.pkts_sent) - rte_pmd_mlx5_trace_tx_exit(loc.pkts_sent, pkts_n); + rte_pmd_mlx5_trace_tx_exit(mlx5_read_pcibar_clock_from_txq(txq), + loc.pkts_sent, pkts_n); return loc.pkts_sent; } diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c index 4e26fa2db8..e6d3ad83e9 100644 --- a/drivers/net/mlx5/mlx5_txpp.c +++ b/drivers/net/mlx5/mlx5_txpp.c @@ -971,7 +971,6 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - struct mlx5_proc_priv *ppriv; uint64_t ts; int ret; @@ -997,15 +996,9 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) *timestamp = ts; return 0; } - /* Check and try to map HCA PIC BAR to allow reading real time. */ - ppriv = dev->process_private; - if (ppriv && !ppriv->hca_bar && - sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) - mlx5_txpp_map_hca_bar(dev); /* Check if we can read timestamp directly from hardware. */ - if (ppriv && ppriv->hca_bar) { - ts = MLX5_GET64(initial_seg, ppriv->hca_bar, real_time); - ts = mlx5_txpp_convert_rx_ts(sh, ts); + ts = mlx5_read_pcibar_clock(dev); + if (ts != 0) { *timestamp = ts; return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index f05534e168..3e93517323 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -333,6 +333,14 @@ mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc) { struct mlx5_priv *priv = dev->data->dev_private; + if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) { + DRV_LOG(ERR, + "port %u number of descriptors requested for Tx queue" + " %u is more than supported", + dev->data->port_id, idx); + rte_errno = EINVAL; + return -EINVAL; + } if (*desc <= MLX5_TX_COMP_THRESH) { DRV_LOG(WARNING, "port %u number of descriptors requested for Tx queue" diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index e7161b66fe..43a314a679 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -54,7 +54,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) MLX5_ASSERT(priv->vlan_filter_n != 0); /* Enabling an existing VLAN filter has no effect. */ if (on) - goto out; + goto no_effect; /* Remove VLAN filter from list. */ --priv->vlan_filter_n; memmove(&priv->vlan_filter[i], @@ -66,14 +66,13 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) MLX5_ASSERT(i == priv->vlan_filter_n); /* Disabling an unknown VLAN filter has no effect. */ if (!on) - goto out; + goto no_effect; /* Add new VLAN filter. */ priv->vlan_filter[priv->vlan_filter_n] = vlan_id; ++priv->vlan_filter_n; } -out: - if (dev->data->dev_started) - return mlx5_traffic_restart(dev); + return on ? mlx5_traffic_vlan_add(dev, vlan_id) : mlx5_traffic_vlan_remove(dev, vlan_id); +no_effect: return 0; } diff --git a/drivers/net/mlx5/tools/mlx5_trace.py b/drivers/net/mlx5/tools/mlx5_trace.py index 8c1fd0a350..96eb82082f 100755 --- a/drivers/net/mlx5/tools/mlx5_trace.py +++ b/drivers/net/mlx5/tools/mlx5_trace.py @@ -21,10 +21,13 @@ def __init__(self): self.wait_burst = [] # waiting for completion self.pq_id = 0 - def log(self): + def log(self, all): """Log all queue bursts""" for txb in self.done_burst: txb.log() + if all == True: + for txb in self.wait_burst: + txb.log() class MlxMbuf: @@ -147,24 +150,26 @@ def __init__(self): self.tx_qlst = {} # active Tx queues per port/queue self.tx_wlst = {} # wait timestamp list per CPU - def run(self, msg_it): + def run(self, msg_it, verbose): """Run over gathered tracing data and build database""" for msg in msg_it: if not isinstance(msg, bt2._EventMessageConst): continue event = msg.event if event.name.startswith(PFX_TX): - do_tx(msg, self) + do_tx(msg, self, verbose) # Handling of other log event cathegories can be added here + if verbose: + print("*** End of raw data dump ***") - def log(self): + def log(self, all): """Log gathered trace database""" for pq_id in self.tx_qlst: queue = self.tx_qlst.get(pq_id) - queue.log() + queue.log(all) -def do_tx_entry(msg, trace): +def do_tx_entry(msg, trace, verbose): """Handle entry Tx busrt""" event = msg.event cpu_id = event["cpu_id"] @@ -172,9 +177,15 @@ def do_tx_entry(msg, trace): if burst is not None: # continue existing burst after WAIT return + if verbose > 0: + print("%u:%X tx_entry(real_time=%u, port_id=%u, queue_id=%u)" % + (msg.default_clock_snapshot.ns_from_origin, cpu_id, + event["real_time"], event["port_id"], event["queue_id"])) # allocate the new burst and append to the queue burst = MlxBurst() - burst.call_ts = msg.default_clock_snapshot.ns_from_origin + burst.call_ts = event["real_time"] + if burst.call_ts == 0: + burst.call_ts = msg.default_clock_snapshot.ns_from_origin trace.tx_blst[cpu_id] = burst pq_id = event["port_id"] << 16 | event["queue_id"] queue = trace.tx_qlst.get(pq_id) @@ -187,45 +198,64 @@ def do_tx_entry(msg, trace): queue.wait_burst.append(burst) -def do_tx_exit(msg, trace): +def do_tx_exit(msg, trace, verbose): """Handle exit Tx busrt""" event = msg.event cpu_id = event["cpu_id"] + if verbose > 0: + print("%u:%X tx_exit(real_time=%u, nb_sent=%u, nb_req=%u)" % + (msg.default_clock_snapshot.ns_from_origin, cpu_id, + event["real_time"], event["nb_sent"], event["nb_req"])) burst = trace.tx_blst.get(cpu_id) if burst is None: return - burst.done_ts = msg.default_clock_snapshot.ns_from_origin + burst.done_ts = event["real_time"] + if burst.done_ts == 0: + burst.done_ts = msg.default_clock_snapshot.ns_from_origin burst.req = event["nb_req"] burst.done = event["nb_sent"] trace.tx_blst.pop(cpu_id) -def do_tx_wqe(msg, trace): +def do_tx_wqe(msg, trace, verbose): """Handle WQE record""" event = msg.event cpu_id = event["cpu_id"] + if verbose > 1: + print("%u:%X tx_wqe(real_time=%u, opcode=%08X)" % + (msg.default_clock_snapshot.ns_from_origin, cpu_id, + event["real_time"], event["opcode"])) burst = trace.tx_blst.get(cpu_id) if burst is None: return wqe = MlxWqe() wqe.wait_ts = trace.tx_wlst.get(cpu_id) if wqe.wait_ts is None: - wqe.wait_ts = msg.default_clock_snapshot.ns_from_origin + wqe.wait_ts = event["real_time"] + if wqe.wait_ts == 0: + wqe.wait_ts = msg.default_clock_snapshot.ns_from_origin wqe.opcode = event["opcode"] burst.wqes.append(wqe) -def do_tx_wait(msg, trace): +def do_tx_wait(msg, trace, verbose): """Handle WAIT record""" event = msg.event cpu_id = event["cpu_id"] + if verbose > 1: + print("%u:%X tx_wait(ts=%u)" % + (msg.default_clock_snapshot.ns_from_origin, cpu_id, event["ts"])) trace.tx_wlst[cpu_id] = event["ts"] -def do_tx_push(msg, trace): +def do_tx_push(msg, trace, verbose): """Handle WQE push event""" event = msg.event cpu_id = event["cpu_id"] + if verbose > 2: + print("%u:%X tx_push(mbuf=%X, pkt_len=%u, nb_segs=%u, wqe_id=%04X)" % + (msg.default_clock_snapshot.ns_from_origin, cpu_id, event["mbuf"], + event["mbuf_pkt_len"], event["mbuf_nb_segs"], event["wqe_id"])) burst = trace.tx_blst.get(cpu_id) if burst is None: return @@ -240,10 +270,15 @@ def do_tx_push(msg, trace): wqe.mbuf.append(mbuf) -def do_tx_complete(msg, trace): +def do_tx_complete(msg, trace, verbose): """Handle send completion event""" event = msg.event pq_id = event["port_id"] << 16 | event["queue_id"] + if verbose > 1: + cpu_id = event["cpu_id"] + print("%u:%X tx_complete(port_id=%u, queue_id=%u, ts=%u, wqe_id=%04X)" % + (msg.default_clock_snapshot.ns_from_origin, cpu_id, + event["port_id"], event["queue_id"], event["ts"], event["wqe_id"])) queue = trace.tx_qlst.get(pq_id) if queue is None: return @@ -258,30 +293,31 @@ def do_tx_complete(msg, trace): if burst.comp(wqe_id, wqe_ts) == 0: break rmv += 1 - # mode completed burst to done list + # move completed burst(s) to done list if rmv != 0: idx = 0 while idx < rmv: + burst = queue.wait_burst[idx] queue.done_burst.append(burst) idx += 1 - del queue.wait_burst[0:rmv] + queue.wait_burst = queue.wait_burst[rmv:] -def do_tx(msg, trace): +def do_tx(msg, trace, verbose): """Handle Tx related records""" name = msg.event.name[PFX_TX_LEN:] if name == "entry": - do_tx_entry(msg, trace) + do_tx_entry(msg, trace, verbose) elif name == "exit": - do_tx_exit(msg, trace) + do_tx_exit(msg, trace, verbose) elif name == "wqe": - do_tx_wqe(msg, trace) + do_tx_wqe(msg, trace, verbose) elif name == "wait": - do_tx_wait(msg, trace) + do_tx_wait(msg, trace, verbose) elif name == "push": - do_tx_push(msg, trace) + do_tx_push(msg, trace, verbose) elif name == "complete": - do_tx_complete(msg, trace) + do_tx_complete(msg, trace, verbose) else: print("Error: unrecognized Tx event name: %s" % msg.event.name, file=sys.stderr) raise ValueError() @@ -292,12 +328,16 @@ def main() -> int: try: parser = argparse.ArgumentParser() parser.add_argument("path", nargs=1, type=str, help="input trace folder") + parser.add_argument("-a", "--all", nargs="?", default=False, const=True, + help="show all the bursts, including incomplete ones") + parser.add_argument("-v", "--verbose", type=int, nargs="?", default=0, const=2, + help="show all the records below specified level") args = parser.parse_args() mlx_tr = MlxTrace() msg_it = bt2.TraceCollectionMessageIterator(args.path) - mlx_tr.run(msg_it) - mlx_tr.log() + mlx_tr.run(msg_it, args.verbose) + mlx_tr.log(args.all) return 0 except ValueError: return -1 diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c index 0ebd233595..268598f209 100644 --- a/drivers/net/mlx5/windows/mlx5_os.c +++ b/drivers/net/mlx5/windows/mlx5_os.c @@ -521,9 +521,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); priv->ctrl_flows = 0; TAILQ_INIT(&priv->flow_meters); - priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); - if (!priv->mtr_profile_tbl) - goto error; + if (priv->mtr_en) { + priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); + if (!priv->mtr_profile_tbl) + goto error; + } /* Bring Ethernet device up. */ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.", eth_dev->data->port_id); @@ -600,6 +602,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, } mlx5_flow_counter_mode_config(eth_dev); mlx5_queue_counter_id_prepare(eth_dev); + rte_spinlock_init(&priv->hw_ctrl_lock); + LIST_INIT(&priv->hw_ctrl_flows); + LIST_INIT(&priv->hw_ext_ctrl_flows); return eth_dev; error: if (priv) { diff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c index 3841c1ebe9..f99f9e6289 100644 --- a/drivers/net/mvneta/mvneta_ethdev.c +++ b/drivers/net/mvneta/mvneta_ethdev.c @@ -91,6 +91,12 @@ mvneta_ifnames_get(const char *key __rte_unused, const char *value, { struct mvneta_ifnames *ifnames = extra_args; + if (ifnames->idx >= NETA_NUM_ETH_PPIO) { + MVNETA_LOG(ERR, "Too many ifnames specified (max %u)", + NETA_NUM_ETH_PPIO); + return -EINVAL; + } + ifnames->names[ifnames->idx++] = value; return 0; diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c index f8cb05a118..1736cb5d07 100644 --- a/drivers/net/netvsc/hn_ethdev.c +++ b/drivers/net/netvsc/hn_ethdev.c @@ -313,6 +313,15 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev, if (reta_conf[idx].mask & mask) hv->rss_ind[i] = reta_conf[idx].reta[shift]; + + /* + * Ensure we don't allow config that directs traffic to an Rx + * queue that we aren't going to poll + */ + if (hv->rss_ind[i] >= dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "RSS distributing traffic to invalid Rx queue"); + return -EINVAL; + } } err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE); diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c index 870f62e5fa..9d3948e03d 100644 --- a/drivers/net/netvsc/hn_rxtx.c +++ b/drivers/net/netvsc/hn_rxtx.c @@ -222,6 +222,17 @@ static void hn_reset_txagg(struct hn_tx_queue *txq) txq->agg_prevpkt = NULL; } +static void +hn_rx_queue_free_common(struct hn_rx_queue *rxq) +{ + if (!rxq) + return; + + rte_free(rxq->rxbuf_info); + rte_free(rxq->event_buf); + rte_free(rxq); +} + int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, @@ -231,6 +242,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, { struct hn_data *hv = dev->data->dev_private; struct hn_tx_queue *txq; + struct hn_rx_queue *rxq = NULL; char name[RTE_MEMPOOL_NAMESIZE]; uint32_t tx_free_thresh; int err = -ENOMEM; @@ -289,6 +301,27 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, goto error; } + /* + * If there are more Tx queues than Rx queues, allocate rx_queues + * with event buffer so that Tx completion messages can still be + * received + */ + if (queue_idx >= dev->data->nb_rx_queues) { + rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); + + if (!rxq) { + err = -ENOMEM; + goto error; + } + + /* + * Don't allocate mbuf pool or rx ring. RSS is always configured + * to ensure packets aren't received by this Rx queue. + */ + rxq->mb_pool = NULL; + rxq->rx_ring = NULL; + } + txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); txq->agg_pktmax = hv->rndis_agg_pkts; txq->agg_align = hv->rndis_agg_align; @@ -299,12 +332,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, socket_id, tx_conf); if (err == 0) { dev->data->tx_queues[queue_idx] = txq; + if (rxq != NULL) + dev->data->rx_queues[queue_idx] = rxq; return 0; } error: rte_mempool_free(txq->txdesc_pool); rte_memzone_free(txq->tx_rndis_mz); + hn_rx_queue_free_common(rxq); rte_free(txq); return err; } @@ -351,6 +387,12 @@ hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) if (!txq) return; + /* + * Free any Rx queues allocated for a Tx queue without a corresponding + * Rx queue + */ + if (qid >= dev->data->nb_rx_queues) + hn_rx_queue_free_common(dev->data->rx_queues[qid]); rte_mempool_free(txq->txdesc_pool); @@ -540,10 +582,12 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, const struct hn_rxinfo *info) { struct hn_data *hv = rxq->hv; - struct rte_mbuf *m; + struct rte_mbuf *m = NULL; bool use_extbuf = false; - m = rte_pktmbuf_alloc(rxq->mb_pool); + if (likely(rxq->mb_pool != NULL)) + m = rte_pktmbuf_alloc(rxq->mb_pool); + if (unlikely(!m)) { struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id]; @@ -930,7 +974,15 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, if (queue_idx == 0) { rxq = hv->primary; } else { - rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); + /* + * If the number of Tx queues was previously greater than the + * number of Rx queues, we may already have allocated an rxq. + */ + if (!dev->data->rx_queues[queue_idx]) + rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); + else + rxq = dev->data->rx_queues[queue_idx]; + if (!rxq) return -ENOMEM; } @@ -963,9 +1015,10 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, fail: rte_ring_free(rxq->rx_ring); - rte_free(rxq->rxbuf_info); - rte_free(rxq->event_buf); - rte_free(rxq); + /* Only free rxq if it was created in this function. */ + if (!dev->data->rx_queues[queue_idx]) + hn_rx_queue_free_common(rxq); + return error; } @@ -986,9 +1039,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary) if (keep_primary && rxq == rxq->hv->primary) return; - rte_free(rxq->rxbuf_info); - rte_free(rxq->event_buf); - rte_free(rxq); + hn_rx_queue_free_common(rxq); } void @@ -1506,14 +1557,32 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *m = tx_pkts[nb_tx]; - uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; struct rndis_packet_msg *pkt; struct hn_txdesc *txd; + uint32_t pkt_size; txd = hn_txd_get(txq); if (txd == NULL) break; + if (!(m->ol_flags & RTE_MBUF_F_TX_VLAN)) { + struct rte_ether_hdr *eh = + rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + struct rte_vlan_hdr *vh; + + /* Force TX vlan offloading for 801.2Q packet */ + if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { + vh = (struct rte_vlan_hdr *)(eh + 1); + m->ol_flags |= RTE_MBUF_F_TX_VLAN; + m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); + + /* Copy ether header over */ + memmove(rte_pktmbuf_adj(m, sizeof(struct rte_vlan_hdr)), + eh, 2 * RTE_ETHER_ADDR_LEN); + } + } + pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; + /* For small packets aggregate them in chimney buffer */ if (m->pkt_len <= hv->tx_copybreak && pkt_size <= txq->agg_szmax) { diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c index eff52c66ee..0b21e4ee5b 100644 --- a/drivers/net/nfp/flower/nfp_conntrack.c +++ b/drivers/net/nfp/flower/nfp_conntrack.c @@ -133,7 +133,7 @@ nfp_ct_merge_table_search(struct nfp_ct_zone_entry *ze, hash_key = rte_jhash(hash_data, hash_len, ze->priv->hash_seed); index = rte_hash_lookup_data(ze->ct_merge_table, &hash_key, (void **)&m_ent); if (index < 0) { - PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table"); + PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table."); return NULL; } @@ -150,7 +150,7 @@ nfp_ct_merge_table_add(struct nfp_ct_zone_entry *ze, hash_key = rte_jhash(merge_entry, sizeof(uint64_t) * 2, ze->priv->hash_seed); ret = rte_hash_add_key_data(ze->ct_merge_table, &hash_key, merge_entry); if (ret != 0) { - PMD_DRV_LOG(ERR, "Add to ct_merge table failed"); + PMD_DRV_LOG(ERR, "Add to ct_merge table failed."); return false; } @@ -167,7 +167,7 @@ nfp_ct_merge_table_delete(struct nfp_ct_zone_entry *ze, hash_key = rte_jhash(m_ent, sizeof(uint64_t) * 2, ze->priv->hash_seed); ret = rte_hash_del_key(ze->ct_merge_table, &hash_key); if (ret < 0) - PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d", ret); + PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d.", ret); } static void @@ -197,7 +197,7 @@ nfp_ct_map_table_search(struct nfp_flow_priv *priv, hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); index = rte_hash_lookup_data(priv->ct_map_table, &hash_key, (void **)&me); if (index < 0) { - PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table"); + PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table."); return NULL; } @@ -214,7 +214,7 @@ nfp_ct_map_table_add(struct nfp_flow_priv *priv, hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed); ret = rte_hash_add_key_data(priv->ct_map_table, &hash_key, me); if (ret != 0) { - PMD_DRV_LOG(ERR, "Add to ct_map table failed"); + PMD_DRV_LOG(ERR, "Add to ct_map table failed."); return false; } @@ -231,7 +231,7 @@ nfp_ct_map_table_delete(struct nfp_flow_priv *priv, hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed); ret = rte_hash_del_key(priv->ct_map_table, &hash_key); if (ret < 0) - PMD_DRV_LOG(ERR, "Delete form ct_map table failed"); + PMD_DRV_LOG(ERR, "Delete form ct_map table failed."); } static void @@ -331,7 +331,7 @@ nfp_flow_item_conf_size_get(enum rte_flow_item_type type, len = sizeof(struct rte_flow_item_geneve); break; default: - PMD_DRV_LOG(ERR, "Unsupported item type: %d", type); + PMD_DRV_LOG(ERR, "Unsupported item type: %d.", type); *size = 0; return false; } @@ -351,13 +351,13 @@ nfp_ct_flow_item_copy_real(const void *src, ret = nfp_flow_item_conf_size_get(type, &len); if (!ret) { - PMD_DRV_LOG(ERR, "Get flow item conf size failed"); + PMD_DRV_LOG(ERR, "Get flow item conf size failed."); return NULL; } dst = rte_zmalloc("flow_item", len, 0); if (dst == NULL) { - PMD_DRV_LOG(ERR, "Malloc memory for ct item failed"); + PMD_DRV_LOG(ERR, "Malloc memory for ct item failed."); return NULL; } @@ -375,7 +375,7 @@ nfp_ct_flow_item_copy(const struct rte_flow_item *src, if (src->spec != NULL) { dst->spec = nfp_ct_flow_item_copy_real(src->spec, src->type); if (dst->spec == NULL) { - PMD_DRV_LOG(ERR, "Copy spec of ct item failed"); + PMD_DRV_LOG(ERR, "Copy spec of ct item failed."); goto end; } } @@ -383,7 +383,7 @@ nfp_ct_flow_item_copy(const struct rte_flow_item *src, if (src->mask != NULL) { dst->mask = nfp_ct_flow_item_copy_real(src->mask, src->type); if (dst->mask == NULL) { - PMD_DRV_LOG(ERR, "Copy mask of ct item failed"); + PMD_DRV_LOG(ERR, "Copy mask of ct item failed."); goto free_spec; } } @@ -391,7 +391,7 @@ nfp_ct_flow_item_copy(const struct rte_flow_item *src, if (src->last != NULL) { dst->last = nfp_ct_flow_item_copy_real(src->last, src->type); if (dst->last == NULL) { - PMD_DRV_LOG(ERR, "Copy last of ct item failed"); + PMD_DRV_LOG(ERR, "Copy last of ct item failed."); goto free_mask; } } @@ -417,7 +417,7 @@ nfp_ct_flow_items_copy(const struct rte_flow_item *src, for (loop = 0; loop < item_cnt; ++loop) { ret = nfp_ct_flow_item_copy(src + loop, dst + loop); if (!ret) { - PMD_DRV_LOG(ERR, "Copy ct item failed"); + PMD_DRV_LOG(ERR, "Copy ct item failed."); nfp_ct_flow_items_free(dst, loop); return false; } @@ -490,7 +490,7 @@ nfp_ct_flow_action_free(struct rte_flow_action *action) func = nfp_ct_flow_action_free_raw; break; default: - PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type); + PMD_DRV_LOG(ERR, "Unsupported action type: %d.", action->type); break; } @@ -517,14 +517,14 @@ nfp_ct_flow_action_copy_real(const void *src, dst = rte_zmalloc("flow_action", len, 0); if (dst == NULL) { - PMD_DRV_LOG(ERR, "Malloc memory for ct action failed"); + PMD_DRV_LOG(ERR, "Malloc memory for ct action failed."); return NULL; } if (func != NULL) { ret = func(src, dst); if (!ret) { - PMD_DRV_LOG(ERR, "Copy ct action failed"); + PMD_DRV_LOG(ERR, "Copy ct action failed."); return NULL; } @@ -559,7 +559,7 @@ nfp_ct_flow_action_copy_raw(const void *src, raw_dst->data = nfp_ct_flow_action_copy_real(raw_src->data, raw_src->size, NULL); if (raw_dst->data == NULL) { - PMD_DRV_LOG(ERR, "Copy ct action process failed"); + PMD_DRV_LOG(ERR, "Copy ct action process failed."); return false; } @@ -625,13 +625,13 @@ nfp_ct_flow_action_copy(const struct rte_flow_action *src, func = nfp_ct_flow_action_copy_raw; break; default: - PMD_DRV_LOG(DEBUG, "Unsupported action type: %d", src->type); + PMD_DRV_LOG(DEBUG, "Unsupported action type: %d.", src->type); return false; } dst->conf = nfp_ct_flow_action_copy_real(src->conf, len, func); if (dst->conf == NULL) { - PMD_DRV_LOG(DEBUG, "Copy ct action process failed"); + PMD_DRV_LOG(DEBUG, "Copy ct action process failed."); return false; } @@ -649,7 +649,7 @@ nfp_ct_flow_actions_copy(const struct rte_flow_action *src, for (loop = 0; loop < action_cnt; ++loop) { ret = nfp_ct_flow_action_copy(src + loop, dst + loop); if (!ret) { - PMD_DRV_LOG(DEBUG, "Copy ct action failed"); + PMD_DRV_LOG(DEBUG, "Copy ct action failed."); nfp_ct_flow_actions_free(dst, loop); return false; } @@ -676,7 +676,7 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze, fe = rte_zmalloc("ct_flow_entry", sizeof(*fe), 0); if (fe == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry"); + PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry."); return NULL; } @@ -693,28 +693,28 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze, fe->rule.items = rte_zmalloc("ct_flow_item", sizeof(struct rte_flow_item) * item_cnt, 0); if (fe->rule.items == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc ct flow items"); + PMD_DRV_LOG(ERR, "Could not alloc ct flow items."); goto free_flow_entry; } fe->rule.actions = rte_zmalloc("ct_flow_action", sizeof(struct rte_flow_action) * action_cnt, 0); if (fe->rule.actions == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc ct flow actions"); + PMD_DRV_LOG(ERR, "Could not alloc ct flow actions."); goto free_flow_item; } /* Deep copy of items */ ret = nfp_ct_flow_items_copy(items, fe->rule.items, item_cnt); if (!ret) { - PMD_DRV_LOG(ERR, "Could not deep copy ct flow items"); + PMD_DRV_LOG(ERR, "Could not deep copy ct flow items."); goto free_flow_action; } /* Deep copy of actions */ ret = nfp_ct_flow_actions_copy(actions, fe->rule.actions, action_cnt); if (!ret) { - PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions"); + PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions."); goto free_copied_items; } @@ -724,7 +724,7 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze, /* Now add a ct map entry */ me = rte_zmalloc("ct_map_entry", sizeof(*me), 0); if (me == NULL) { - PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed"); + PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed."); goto free_copied_actions; } @@ -735,7 +735,7 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze, priv = repr->app_fw_flower->flow_priv; ret = nfp_ct_map_table_add(priv, me); if (!ret) { - PMD_DRV_LOG(ERR, "Add into ct map table failed"); + PMD_DRV_LOG(ERR, "Add into ct map table failed."); goto free_map_entry; } @@ -818,7 +818,7 @@ nfp_ct_zone_table_search(struct nfp_flow_priv *priv, hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); index = rte_hash_lookup_data(priv->ct_zone_table, &hash_key, (void **)&ze); if (index < 0) { - PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table"); + PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table."); return NULL; } @@ -835,7 +835,7 @@ nfp_ct_zone_table_add(struct nfp_flow_priv *priv, hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed); ret = rte_hash_add_key_data(priv->ct_zone_table, &hash_key, ze); if (ret != 0) { - PMD_DRV_LOG(ERR, "Add to the ct_zone table failed"); + PMD_DRV_LOG(ERR, "Add to the ct_zone table failed."); return false; } @@ -852,7 +852,7 @@ nfp_ct_zone_table_delete(struct nfp_flow_priv *priv, hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed); ret = rte_hash_del_key(priv->ct_zone_table, &hash_key); if (ret < 0) - PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed"); + PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed."); } static bool @@ -880,7 +880,7 @@ nfp_ct_zone_entry_init(struct nfp_ct_zone_entry *ze, ct_merge_hash_params.hash_func_init_val = priv->hash_seed; ze->ct_merge_table = rte_hash_create(&ct_merge_hash_params); if (ze->ct_merge_table == NULL) { - PMD_DRV_LOG(ERR, "ct merge table creation failed"); + PMD_DRV_LOG(ERR, "CT merge table creation failed."); return false; } @@ -925,13 +925,13 @@ nfp_ct_zone_entry_get(struct nfp_flow_priv *priv, ze = rte_zmalloc("ct_zone_wc", sizeof(*ze), 0); if (ze == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry"); + PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry."); return NULL; } is_ok = nfp_ct_zone_entry_init(ze, priv, zone, true); if (!is_ok) { - PMD_DRV_LOG(ERR, "Init ct zone wc entry failed"); + PMD_DRV_LOG(ERR, "Init ct zone wc entry failed."); goto free_ct_zone_entry; } @@ -943,19 +943,19 @@ nfp_ct_zone_entry_get(struct nfp_flow_priv *priv, ze = rte_zmalloc("ct_zone_entry", sizeof(*ze), 0); if (ze == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry"); + PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry."); return NULL; } is_ok = nfp_ct_zone_entry_init(ze, priv, zone, false); if (!is_ok) { - PMD_DRV_LOG(ERR, "Init ct zone entry failed"); + PMD_DRV_LOG(ERR, "Init ct zone entry failed."); goto free_ct_zone_entry; } is_ok = nfp_ct_zone_table_add(priv, ze); if (!is_ok) { - PMD_DRV_LOG(ERR, "Add into ct zone table failed"); + PMD_DRV_LOG(ERR, "Add into ct zone table failed."); goto free_ct_zone_entry; } } @@ -1046,7 +1046,7 @@ nfp_ct_offload_del(struct rte_eth_dev *dev, if (m_ent->compiled_rule != NULL) { ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error); if (ret != 0) { - PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item"); + PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item."); return -EINVAL; } m_ent->compiled_rule = NULL; @@ -1062,7 +1062,7 @@ nfp_ct_offload_del(struct rte_eth_dev *dev, if (m_ent->compiled_rule != NULL) { ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error); if (ret != 0) { - PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item"); + PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item."); return -EINVAL; } m_ent->compiled_rule = NULL; @@ -1467,7 +1467,7 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze, merge_entry = rte_zmalloc("ct_merge_entry", sizeof(*merge_entry), 0); if (merge_entry == NULL) { - PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed"); + PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed."); return false; } @@ -1483,14 +1483,14 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze, merge_entry->rule.items = rte_zmalloc("ct_flow_item", sizeof(struct rte_flow_item) * merge_entry->rule.items_cnt, 0); if (merge_entry->rule.items == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc items for merged flow"); + PMD_DRV_LOG(ERR, "Could not alloc items for merged flow."); goto merge_exit; } merge_entry->rule.actions = rte_zmalloc("ct_flow_action", sizeof(struct rte_flow_action) * merge_entry->rule.actions_cnt, 0); if (merge_entry->rule.actions == NULL) { - PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow"); + PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow."); goto free_items; } @@ -1503,14 +1503,14 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze, ret = nfp_ct_merge_table_add(ze, merge_entry); if (!ret) { - PMD_DRV_LOG(ERR, "Add into ct merge table failed"); + PMD_DRV_LOG(ERR, "Add into ct merge table failed."); goto free_actions; } /* Send to firmware */ ret = nfp_ct_offload_add(pre_ct_entry->dev, merge_entry); if (ret != 0) { - PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed"); + PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed."); goto merge_table_del; } @@ -1542,7 +1542,7 @@ nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe, LIST_FOREACH(fe_tmp, &ze_src->post_ct_list, post_ct_list) { ret = nfp_ct_do_flow_merge(ze_dst, fe, fe_tmp); if (!ret) { - PMD_DRV_LOG(ERR, "Merge for ct pre flow failed"); + PMD_DRV_LOG(ERR, "Merge for ct pre flow failed."); return false; } } @@ -1550,7 +1550,7 @@ nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe, LIST_FOREACH(fe_tmp, &ze_src->pre_ct_list, pre_ct_list) { ret = nfp_ct_do_flow_merge(ze_dst, fe_tmp, fe); if (!ret) { - PMD_DRV_LOG(ERR, "Merge for ct post flow failed"); + PMD_DRV_LOG(ERR, "Merge for ct post flow failed."); return false; } } @@ -1577,14 +1577,14 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item, priv = representor->app_fw_flower->flow_priv; ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, false); if (ze == NULL) { - PMD_DRV_LOG(ERR, "Could not get ct zone entry"); + PMD_DRV_LOG(ERR, "Could not get ct zone entry."); return false; } /* Add entry to pre_ct_list */ fe = nfp_ct_flow_entry_get(ze, dev, items, actions, cookie); if (fe == NULL) { - PMD_DRV_LOG(ERR, "Could not get ct flow entry"); + PMD_DRV_LOG(ERR, "Could not get ct flow entry."); goto ct_zone_entry_free; } @@ -1593,7 +1593,7 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item, ret = nfp_ct_merge_flow_entries(fe, ze, ze); if (!ret) { - PMD_DRV_LOG(ERR, "Merge ct flow entries failed"); + PMD_DRV_LOG(ERR, "Merge ct flow entries failed."); goto ct_flow_entry_free; } @@ -1601,7 +1601,7 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item, if (priv->ct_zone_wc != NULL) { ret = nfp_ct_merge_flow_entries(fe, priv->ct_zone_wc, ze); if (!ret) { - PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed"); + PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed."); goto ct_flow_entry_free; } } @@ -1639,7 +1639,7 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item, if (ct_mask->ct_zone == 0) { wildcard = true; } else if (ct_mask->ct_zone != UINT16_MAX) { - PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported"); + PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported."); return false; } @@ -1647,14 +1647,14 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item, priv = representor->app_fw_flower->flow_priv; ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, wildcard); if (ze == NULL) { - PMD_DRV_LOG(ERR, "Could not get ct zone entry"); + PMD_DRV_LOG(ERR, "Could not get ct zone entry."); return false; } /* Add entry to post_ct_list */ fe = nfp_ct_flow_entry_get(ze, dev, items, actions, cookie); if (fe == NULL) { - PMD_DRV_LOG(ERR, "Could not get ct flow entry"); + PMD_DRV_LOG(ERR, "Could not get ct flow entry."); goto ct_zone_entry_free; } @@ -1666,7 +1666,7 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item, ze = (struct nfp_ct_zone_entry *)next_data; ret = nfp_ct_merge_flow_entries(fe, ze, ze); if (!ret) { - PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed"); + PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed."); break; } } @@ -1715,7 +1715,7 @@ nfp_ct_flow_setup(struct rte_eth_dev *dev, validate_flag, cookie, false, false); } - PMD_DRV_LOG(ERR, "Handle nfp post ct flow failed."); + PMD_DRV_LOG(ERR, "Handle NFP post ct flow failed."); return NULL; } @@ -1726,7 +1726,7 @@ nfp_ct_flow_setup(struct rte_eth_dev *dev, validate_flag, cookie, false, false); } - PMD_DRV_LOG(ERR, "Handle nfp pre ct flow failed."); + PMD_DRV_LOG(ERR, "Handle NFP pre ct flow failed."); return NULL; } diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c index c1a3532c11..f087d0dfdc 100644 --- a/drivers/net/nfp/flower/nfp_flower.c +++ b/drivers/net/nfp/flower/nfp_flower.c @@ -23,6 +23,20 @@ #define CTRL_VNIC_NB_DESC 512 +int +nfp_flower_pf_stop(struct rte_eth_dev *dev) +{ + struct nfp_net_hw_priv *hw_priv; + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + hw_priv = dev->process_private; + nfp_flower_cmsg_port_mod(repr->app_fw_flower, repr->port_id, false); + (void)nfp_eth_set_configured(hw_priv->pf_dev->cpp, repr->nfp_idx, 0); + + return nfp_net_stop(dev); +} + int nfp_flower_pf_start(struct rte_eth_dev *dev) { @@ -34,6 +48,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) struct nfp_net_hw *net_hw; struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; + struct nfp_net_hw_priv *hw_priv; struct nfp_flower_representor *repr; repr = dev->data->dev_private; @@ -71,7 +86,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) /* If an error when reconfig we avoid to change hw state */ ret = nfp_reconfig(hw, new_ctrl, update); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to reconfig PF vnic"); + PMD_INIT_LOG(ERR, "Failed to reconfig PF vnic."); return -EIO; } @@ -80,10 +95,16 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) /* Setup the freelist ring */ ret = nfp_net_rx_freelist_setup(dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Error with flower PF vNIC freelist setup"); + PMD_INIT_LOG(ERR, "Error with flower PF vNIC freelist setup."); return -EIO; } + hw_priv = dev->process_private; + if (hw_priv->pf_dev->multi_pf.enabled) { + (void)nfp_eth_set_configured(hw_priv->pf_dev->cpp, repr->nfp_idx, 1); + nfp_flower_cmsg_port_mod(repr->app_fw_flower, repr->port_id, true); + } + for (i = 0; i < dev->data->nb_rx_queues; i++) dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; for (i = 0; i < dev->data->nb_tx_queues; i++) @@ -134,12 +155,12 @@ nfp_flower_pf_dispatch_pkts(struct nfp_net_rxq *rxq, repr = nfp_flower_get_repr(rxq->hw_priv, port_id); if (repr == NULL) { - PMD_RX_LOG(ERR, "Can not get repr for port %u", port_id); + PMD_RX_LOG(ERR, "Can not get repr for port %u.", port_id); return false; } if (repr->ring == NULL || repr->ring[rxq->qidx] == NULL) { - PMD_RX_LOG(ERR, "No ring available for repr_port %s", repr->name); + PMD_RX_LOG(ERR, "No ring available for repr_port %s.", repr->name); return false; } @@ -194,6 +215,76 @@ nfp_flower_pf_xmit_pkts(void *tx_queue, return app_fw_flower->nfd_func.pf_xmit_t(tx_queue, tx_pkts, nb_pkts); } +uint16_t +nfp_flower_multiple_pf_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + int i; + uint16_t recv; + uint32_t data_len; + struct nfp_net_rxq *rxq; + struct rte_eth_dev *repr_dev; + struct nfp_flower_representor *repr; + + recv = nfp_net_recv_pkts(rx_queue, rx_pkts, nb_pkts); + if (recv != 0) { + /* Grab a handle to the representor struct */ + rxq = rx_queue; + repr_dev = &rte_eth_devices[rxq->port_id]; + repr = repr_dev->data->dev_private; + + data_len = 0; + for (i = 0; i < recv; i++) + data_len += rx_pkts[i]->data_len; + + repr->repr_stats.ipackets += recv; + repr->repr_stats.q_ipackets[rxq->qidx] += recv; + repr->repr_stats.q_ibytes[rxq->qidx] += data_len; + } + + return recv; +} + +uint16_t +nfp_flower_multiple_pf_xmit_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i; + uint16_t sent; + uint32_t data_len; + struct nfp_net_txq *txq; + struct rte_eth_dev *repr_dev; + struct nfp_flower_representor *repr; + + txq = tx_queue; + if (unlikely(txq == NULL)) { + PMD_TX_LOG(ERR, "TX Bad queue."); + return 0; + } + + /* Grab a handle to the representor struct */ + repr_dev = &rte_eth_devices[txq->port_id]; + repr = repr_dev->data->dev_private; + for (i = 0; i < nb_pkts; i++) + nfp_flower_pkt_add_metadata(repr->app_fw_flower, + tx_pkts[i], repr->port_id); + + sent = nfp_flower_pf_xmit_pkts(tx_queue, tx_pkts, nb_pkts); + if (sent != 0) { + data_len = 0; + for (i = 0; i < sent; i++) + data_len += tx_pkts[i]->data_len; + + repr->repr_stats.opackets += sent; + repr->repr_stats.q_opackets[txq->qidx] += sent; + repr->repr_stats.q_obytes[txq->qidx] += data_len; + } + + return sent; +} + static int nfp_flower_init_vnic_common(struct nfp_net_hw_priv *hw_priv, struct nfp_net_hw *hw, @@ -207,7 +298,7 @@ nfp_flower_init_vnic_common(struct nfp_net_hw_priv *hw_priv, pf_dev = hw_priv->pf_dev; - PMD_INIT_LOG(DEBUG, "%s vNIC ctrl bar: %p", vnic_type, hw->super.ctrl_bar); + PMD_INIT_LOG(DEBUG, "%s vNIC ctrl bar: %p.", vnic_type, hw->super.ctrl_bar); err = nfp_net_common_init(pf_dev, hw); if (err != 0) @@ -264,7 +355,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, ret = nfp_flower_init_vnic_common(hw_priv, hw, "ctrl_vnic"); if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not init pf vnic"); + PMD_INIT_LOG(ERR, "Could not init pf vnic."); return -EINVAL; } @@ -272,7 +363,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, app_fw_flower->ctrl_ethdev = rte_zmalloc("nfp_ctrl_vnic", sizeof(struct rte_eth_dev), RTE_CACHE_LINE_SIZE); if (app_fw_flower->ctrl_ethdev == NULL) { - PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic"); + PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic."); return -ENOMEM; } @@ -283,7 +374,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, eth_dev->data = rte_zmalloc("nfp_ctrl_vnic_data", sizeof(struct rte_eth_dev_data), RTE_CACHE_LINE_SIZE); if (eth_dev->data == NULL) { - PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic data"); + PMD_INIT_LOG(ERR, "Could not allocate ctrl vnic data."); ret = -ENOMEM; goto eth_dev_cleanup; } @@ -298,7 +389,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name, 4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node); if (app_fw_flower->ctrl_pktmbuf_pool == NULL) { - PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed"); + PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed."); ret = -ENOMEM; goto dev_data_cleanup; } @@ -312,7 +403,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, sizeof(eth_dev->data->rx_queues[0]) * n_rxq, RTE_CACHE_LINE_SIZE); if (eth_dev->data->rx_queues == NULL) { - PMD_INIT_LOG(ERR, "rte_zmalloc failed for ctrl vNIC rx queues"); + PMD_INIT_LOG(ERR, "The rte_zmalloc failed for ctrl vNIC rx queues."); ret = -ENOMEM; goto mempool_cleanup; } @@ -321,7 +412,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, sizeof(eth_dev->data->tx_queues[0]) * n_txq, RTE_CACHE_LINE_SIZE); if (eth_dev->data->tx_queues == NULL) { - PMD_INIT_LOG(ERR, "rte_zmalloc failed for ctrl vNIC tx queues"); + PMD_INIT_LOG(ERR, "The rte_zmalloc failed for ctrl vNIC tx queues."); ret = -ENOMEM; goto rx_queue_free; } @@ -339,7 +430,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, sizeof(struct nfp_net_rxq), RTE_CACHE_LINE_SIZE, numa_node); if (rxq == NULL) { - PMD_DRV_LOG(ERR, "Error allocating rxq"); + PMD_DRV_LOG(ERR, "Error allocating rxq."); ret = -ENOMEM; goto rx_queue_setup_cleanup; } @@ -373,7 +464,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, hw_priv->dev_info->max_qc_size, NFP_MEMZONE_ALIGN, numa_node); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocating rx dma"); + PMD_DRV_LOG(ERR, "Error allocating rx dma."); rte_free(rxq); ret = -ENOMEM; goto rx_queue_setup_cleanup; @@ -414,7 +505,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, sizeof(struct nfp_net_txq), RTE_CACHE_LINE_SIZE, numa_node); if (txq == NULL) { - PMD_DRV_LOG(ERR, "Error allocating txq"); + PMD_DRV_LOG(ERR, "Error allocating txq."); ret = -ENOMEM; goto tx_queue_setup_cleanup; } @@ -431,7 +522,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, hw_priv->dev_info->max_qc_size, NFP_MEMZONE_ALIGN, numa_node); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocating tx dma"); + PMD_DRV_LOG(ERR, "Error allocating tx dma."); rte_free(txq); ret = -ENOMEM; goto tx_queue_setup_cleanup; @@ -476,7 +567,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower, /* Alloc sync memory zone */ ret = nfp_flower_service_sync_alloc(hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "Alloc sync memory zone failed"); + PMD_INIT_LOG(ERR, "Alloc sync memory zone failed."); goto tx_queue_setup_cleanup; } @@ -593,7 +684,7 @@ nfp_flower_start_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower) /* If an error when reconfig we avoid to change hw state */ ret = nfp_reconfig(hw, new_ctrl, update); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to reconfig ctrl vnic"); + PMD_INIT_LOG(ERR, "Failed to reconfig ctrl vnic."); return -EIO; } @@ -602,7 +693,7 @@ nfp_flower_start_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower) /* Setup the freelist ring */ ret = nfp_net_rx_freelist_setup(dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Error with flower ctrl vNIC freelist setup"); + PMD_INIT_LOG(ERR, "Error with flower ctrl vNIC freelist setup."); return -EIO; } @@ -662,7 +753,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) app_fw_flower = rte_zmalloc_socket("nfp_app_fw_flower", sizeof(*app_fw_flower), RTE_CACHE_LINE_SIZE, numa_node); if (app_fw_flower == NULL) { - PMD_INIT_LOG(ERR, "Could not malloc app fw flower"); + PMD_INIT_LOG(ERR, "Could not malloc app fw flower."); return -ENOMEM; } @@ -670,13 +761,13 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) ret = nfp_flow_priv_init(pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "init flow priv failed"); + PMD_INIT_LOG(ERR, "Init flow priv failed."); goto app_cleanup; } ret = nfp_mtr_priv_init(pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Error initializing metering private data"); + PMD_INIT_LOG(ERR, "Error initializing metering private data."); goto flow_priv_cleanup; } @@ -684,7 +775,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) pf_hw = rte_zmalloc_socket("nfp_pf_vnic", 2 * sizeof(struct nfp_net_hw), RTE_CACHE_LINE_SIZE, numa_node); if (pf_hw == NULL) { - PMD_INIT_LOG(ERR, "Could not malloc nfp pf vnic"); + PMD_INIT_LOG(ERR, "Could not malloc nfp pf vnic."); ret = -ENOMEM; goto mtr_priv_cleanup; } @@ -694,7 +785,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name, pf_dev->ctrl_bar_size, &pf_dev->ctrl_area); if (pf_dev->ctrl_bar == NULL) { - PMD_INIT_LOG(ERR, "Cloud not map the PF vNIC ctrl bar"); + PMD_INIT_LOG(ERR, "Could not map the PF vNIC ctrl bar."); ret = -ENODEV; goto vnic_cleanup; } @@ -703,7 +794,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) ext_features = nfp_rtsym_read_le(pf_dev->sym_tbl, "_abi_flower_extra_features", &err); if (err != 0) { - PMD_INIT_LOG(ERR, "Couldn't read extra features from fw"); + PMD_INIT_LOG(ERR, "Could not read extra features from fw."); ret = -EIO; goto pf_cpp_area_cleanup; } @@ -718,13 +809,13 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) ret = nfp_flower_init_vnic_common(hw_priv, pf_hw, "pf_vnic"); if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not initialize flower PF vNIC"); + PMD_INIT_LOG(ERR, "Could not initialize flower PF vNIC."); goto pf_cpp_area_cleanup; } ret = nfp_net_vf_config_app_init(pf_hw, pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to init sriov module"); + PMD_INIT_LOG(ERR, "Failed to init sriov module."); goto pf_cpp_area_cleanup; } @@ -739,35 +830,35 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) ctrl_hw->super.ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, ctrl_name, pf_dev->ctrl_bar_size, &ctrl_hw->ctrl_area); if (ctrl_hw->super.ctrl_bar == NULL) { - PMD_INIT_LOG(ERR, "Cloud not map the ctrl vNIC ctrl bar"); + PMD_INIT_LOG(ERR, "Could not map the ctrl vNIC ctrl bar."); ret = -ENODEV; goto pf_cpp_area_cleanup; } ret = nfp_flower_init_ctrl_vnic(app_fw_flower, hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not initialize flower ctrl vNIC"); + PMD_INIT_LOG(ERR, "Could not initialize flower ctrl vNIC."); goto ctrl_cpp_area_cleanup; } /* Start the ctrl vNIC */ ret = nfp_flower_start_ctrl_vnic(app_fw_flower); if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not start flower ctrl vNIC"); + PMD_INIT_LOG(ERR, "Could not start flower ctrl vNIC."); goto ctrl_vnic_cleanup; } /* Start up flower services */ ret = nfp_flower_service_start(hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not enable flower services"); + PMD_INIT_LOG(ERR, "Could not enable flower services."); ret = -ESRCH; goto ctrl_vnic_cleanup; } ret = nfp_flower_repr_create(app_fw_flower, hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not create representor ports"); + PMD_INIT_LOG(ERR, "Could not create representor ports."); goto ctrl_vnic_service_stop; } @@ -807,7 +898,7 @@ nfp_uninit_app_fw_flower(struct nfp_net_hw_priv *hw_priv) nfp_mtr_priv_uninit(pf_dev); nfp_flow_priv_uninit(pf_dev); if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) - PMD_DRV_LOG(WARNING, "Failed to free switch domain for device"); + PMD_DRV_LOG(WARNING, "Failed to free switch domain for device."); rte_free(app_fw_flower); } @@ -833,12 +924,12 @@ nfp_secondary_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv) pci_name = strchr(hw_priv->pf_dev->pci_dev->name, ':') + 1; snprintf(port_name, RTE_ETH_NAME_MAX_LEN, "%s_repr_pf", pci_name); - PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); + PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s.", port_name); ret = rte_eth_dev_create(&hw_priv->pf_dev->pci_dev->device, port_name, 0, NULL, NULL, nfp_secondary_flower_init, hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); + PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed.", port_name); return -ENODEV; } diff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h index 1dc868fb68..7b919a939b 100644 --- a/drivers/net/nfp/flower/nfp_flower.h +++ b/drivers/net/nfp/flower/nfp_flower.h @@ -115,7 +115,14 @@ bool nfp_flower_pf_dispatch_pkts(struct nfp_net_rxq *rxq, uint32_t port_id); uint16_t nfp_flower_pf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t nfp_flower_multiple_pf_xmit_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t nfp_flower_multiple_pf_recv_pkts(void *rx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int nfp_flower_pf_start(struct rte_eth_dev *dev); +int nfp_flower_pf_stop(struct rte_eth_dev *dev); uint32_t nfp_flower_pkt_add_metadata(struct nfp_app_fw_flower *app_fw_flower, struct rte_mbuf *mbuf, uint32_t port_id); diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c index 9a5b5f52b8..92bb927196 100644 --- a/drivers/net/nfp/flower/nfp_flower_cmsg.c +++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c @@ -29,7 +29,7 @@ nfp_flower_cmsg_init(struct nfp_app_fw_flower *app_fw_flower, struct nfp_flower_cmsg_hdr *hdr; pkt = rte_pktmbuf_mtod(m, char *); - PMD_DRV_LOG(DEBUG, "flower_cmsg_init using pkt at %p", pkt); + PMD_DRV_LOG(DEBUG, "The flower_cmsg_init using pkt at %p.", pkt); new_size += nfp_flower_pkt_add_metadata(app_fw_flower, m, NFP_NET_META_PORT_ID_CTRL); @@ -94,7 +94,7 @@ nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(ERR, "Could not allocate mac repr cmsg"); + PMD_DRV_LOG(ERR, "Could not allocate mac repr cmsg."); return -ENOMEM; } @@ -131,7 +131,7 @@ nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "alloc mbuf for repr reify failed"); + PMD_DRV_LOG(DEBUG, "Alloc mbuf for repr reify failed."); return -ENOMEM; } @@ -161,7 +161,7 @@ nfp_flower_cmsg_port_mod(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "alloc mbuf for repr portmod failed"); + PMD_DRV_LOG(DEBUG, "Alloc mbuf for repr portmod failed."); return -ENOMEM; } @@ -263,7 +263,7 @@ nfp_flower_cmsg_tun_neigh_v4_rule(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun neigh"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun neigh."); return -ENOMEM; } @@ -295,7 +295,7 @@ nfp_flower_cmsg_tun_neigh_v6_rule(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun neigh"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun neigh."); return -ENOMEM; } @@ -328,7 +328,7 @@ nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower) mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun addr"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun addr."); return -ENOMEM; } @@ -371,7 +371,7 @@ nfp_flower_cmsg_tun_off_v6(struct nfp_app_fw_flower *app_fw_flower) mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun addr"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun addr."); return -ENOMEM; } @@ -415,7 +415,7 @@ nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for pre tunnel rule"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for pre tunnel rule."); return -ENOMEM; } @@ -457,7 +457,7 @@ nfp_flower_cmsg_tun_mac_rule(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for tunnel mac"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for tunnel mac."); return -ENOMEM; } @@ -491,7 +491,7 @@ nfp_flower_cmsg_qos_add(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos add"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos add."); return -ENOMEM; } @@ -521,7 +521,7 @@ nfp_flower_cmsg_qos_delete(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos delete"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos delete."); return -ENOMEM; } @@ -551,7 +551,7 @@ nfp_flower_cmsg_qos_stats(struct nfp_app_fw_flower *app_fw_flower, mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool); if (mbuf == NULL) { - PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos stats"); + PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for qos stats."); return -ENOMEM; } diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h index eda047a404..a7866e8382 100644 --- a/drivers/net/nfp/flower/nfp_flower_cmsg.h +++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h @@ -1045,4 +1045,11 @@ int nfp_flower_cmsg_qos_delete(struct nfp_app_fw_flower *app_fw_flower, int nfp_flower_cmsg_qos_stats(struct nfp_app_fw_flower *app_fw_flower, struct nfp_cfg_head *head); +static inline bool +nfp_flower_port_is_phy_port(uint32_t port_id) +{ + return (NFP_FLOWER_CMSG_PORT_TYPE(port_id) == + NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT); +} + #endif /* __NFP_CMSG_H__ */ diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c index 9b957e1f1e..92887ce1be 100644 --- a/drivers/net/nfp/flower/nfp_flower_ctrl.c +++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c @@ -38,7 +38,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, * DPDK just checks the queue is lower than max queues * enabled. But the queue needs to be configured. */ - PMD_RX_LOG(ERR, "RX Bad queue"); + PMD_RX_LOG(ERR, "RX Bad queue."); return 0; } @@ -46,7 +46,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, while (avail < nb_pkts) { rxb = &rxq->rxbufs[rxq->rd_p]; if (unlikely(rxb == NULL)) { - PMD_RX_LOG(ERR, "rxb does not exist!"); + PMD_RX_LOG(ERR, "The rxb does not exist!"); break; } @@ -66,7 +66,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); if (unlikely(new_mb == NULL)) { - PMD_RX_LOG(ERR, "RX mbuf alloc failed port_id=%u queue_id=%hu", + PMD_RX_LOG(ERR, "RX mbuf alloc failed port_id=%u queue_id=%hu.", rxq->port_id, rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); break; @@ -90,7 +90,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, * responsibility of avoiding it. But we have * to give some info about the error. */ - PMD_RX_LOG(ERR, "mbuf overflow likely due to the RX offset."); + PMD_RX_LOG(ERR, "The mbuf overflow likely due to the RX offset."); rte_pktmbuf_free(mb); break; } @@ -133,7 +133,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue, */ rte_wmb(); if (nb_hold >= rxq->rx_free_thresh) { - PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu", + PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu.", rxq->port_id, rxq->qidx, nb_hold, avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); nb_hold = 0; @@ -165,7 +165,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower, * DPDK just checks the queue is lower than max queues * enabled. But the queue needs to be configured. */ - PMD_TX_LOG(ERR, "ctrl dev TX Bad queue"); + PMD_TX_LOG(ERR, "Ctrl dev TX Bad queue."); goto xmit_end; } @@ -180,7 +180,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower, free_descs = nfp_net_nfd3_free_tx_desc(txq); if (unlikely(free_descs == 0)) { - PMD_TX_LOG(ERR, "ctrl dev no free descs"); + PMD_TX_LOG(ERR, "Ctrl dev no free descs."); goto xmit_end; } @@ -236,7 +236,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower, txq = ctrl_dev->data->tx_queues[0]; if (unlikely(mbuf->nb_segs > 1)) { - PMD_TX_LOG(ERR, "Multisegment packet not supported"); + PMD_TX_LOG(ERR, "Multisegment packet not supported."); return 0; } @@ -246,7 +246,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower, free_descs = nfp_net_nfdk_free_tx_desc(txq); if (unlikely(free_descs < NFDK_TX_DESC_PER_SIMPLE_PKT)) { - PMD_TX_LOG(ERR, "ctrl dev no free descs"); + PMD_TX_LOG(ERR, "Ctrl dev no free descs."); return 0; } @@ -323,7 +323,7 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower, used_descs = ktxds - txq->ktxds - txq->wr_p; if (RTE_ALIGN_FLOOR(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) != RTE_ALIGN_FLOOR(txq->wr_p + used_descs - 1, NFDK_TX_DESC_BLOCK_CNT)) { - PMD_TX_LOG(INFO, "Used descs cross block boundary"); + PMD_TX_LOG(INFO, "Used descs cross block boundary."); return 0; } @@ -436,18 +436,22 @@ nfp_flower_cmsg_port_mod_rx(struct nfp_net_hw_priv *hw_priv, break; case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: index = NFP_FLOWER_CMSG_PORT_VNIC_OFFSET(port, hw_priv->pf_dev->vf_base_id); - if (NFP_FLOWER_CMSG_PORT_VNIC_TYPE(port) == NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF) - repr = app_fw_flower->vf_reprs[index]; - else + if (NFP_FLOWER_CMSG_PORT_VNIC_TYPE(port) == NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF) { + repr = app_fw_flower->vf_reprs[index]; + } else { + if (hw_priv->pf_dev->multi_pf.enabled) + return 0; + repr = app_fw_flower->pf_repr; + } break; default: - PMD_DRV_LOG(ERR, "ctrl msg for unknown port %#x", port); + PMD_DRV_LOG(ERR, "Ctrl msg for unknown port %#x.", port); return -EINVAL; } if (repr == NULL) { - PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x", port); + PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x.", port); return -EINVAL; } diff --git a/drivers/net/nfp/flower/nfp_flower_flow.c b/drivers/net/nfp/flower/nfp_flower_flow.c index df3fd5e2a2..ad0fe9a8ef 100644 --- a/drivers/net/nfp/flower/nfp_flower_flow.c +++ b/drivers/net/nfp/flower/nfp_flower_flow.c @@ -316,7 +316,7 @@ nfp_mask_table_add(struct nfp_app_fw_flower *app_fw_flower, mask_entry->mask_id = mask_id; mask_entry->hash_key = hash_key; mask_entry->ref_cnt = 1; - PMD_DRV_LOG(DEBUG, "hash_key=%#x id=%u ref=%u", hash_key, + PMD_DRV_LOG(DEBUG, "The hash_key=%#x id=%u ref=%u.", hash_key, mask_id, mask_entry->ref_cnt); ret = rte_hash_add_key_data(priv->mask_table, &hash_key, mask_entry); @@ -517,7 +517,7 @@ nfp_flow_table_delete_merge(struct nfp_flow_priv *priv, flow_find = nfp_flow_table_search(priv, nfp_flow); if (flow_find == NULL) { - PMD_DRV_LOG(ERR, "Can't delete a non-existing flow."); + PMD_DRV_LOG(ERR, "Can not delete a non-existing flow."); return -EINVAL; } @@ -1058,7 +1058,7 @@ nfp_flow_key_layers_check_items(const struct rte_flow_item items[], for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { if (item->type >= RTE_DIM(check_item_fns)) { - PMD_DRV_LOG(ERR, "Flow item %d unsupported", item->type); + PMD_DRV_LOG(ERR, "Flow item %d unsupported.", item->type); return -ERANGE; } @@ -1068,7 +1068,7 @@ nfp_flow_key_layers_check_items(const struct rte_flow_item items[], param->item = item; ret = check_item_fns[item->type](param); if (ret != 0) { - PMD_DRV_LOG(ERR, "Flow item %d check fail", item->type); + PMD_DRV_LOG(ERR, "Flow item %d check fail.", item->type); return ret; } @@ -1264,7 +1264,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[], for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { if (item->type >= RTE_DIM(item_fns) || item_fns[item->type] == NULL) { - PMD_DRV_LOG(ERR, "Flow item %d unsupported", item->type); + PMD_DRV_LOG(ERR, "Flow item %d unsupported.", item->type); return -ERANGE; } @@ -1432,22 +1432,22 @@ nfp_flow_is_validate_field_data(const struct rte_flow_field_data *data, uint32_t data_width) { if (data->level != 0) { - PMD_DRV_LOG(ERR, "The 'level' is not support"); + PMD_DRV_LOG(ERR, "The 'level' is not support."); return false; } if (data->tag_index != 0) { - PMD_DRV_LOG(ERR, "The 'tag_index' is not support"); + PMD_DRV_LOG(ERR, "The 'tag_index' is not support."); return false; } if (data->class_id != 0) { - PMD_DRV_LOG(ERR, "The 'class_id' is not support"); + PMD_DRV_LOG(ERR, "The 'class_id' is not support."); return false; } if (data->offset + conf_width > data_width) { - PMD_DRV_LOG(ERR, "The 'offset' value is too big"); + PMD_DRV_LOG(ERR, "The 'offset' value is too big."); return false; } @@ -1472,25 +1472,25 @@ nfp_flow_action_check_modify(struct nfp_action_calculate_param *param) src_data = &conf->src; if (!nfp_flow_field_id_dst_support(dst_data->field) || !nfp_flow_field_id_src_support(src_data->field)) { - PMD_DRV_LOG(ERR, "Not supported field id"); + PMD_DRV_LOG(ERR, "Not supported field id."); return -EINVAL; } width = conf->width; if (width == 0) { - PMD_DRV_LOG(ERR, "No bits are required to modify"); + PMD_DRV_LOG(ERR, "No bits are required to modify."); return -EINVAL; } dst_width = nfp_flow_field_width(dst_data->field, 0); src_width = nfp_flow_field_width(src_data->field, dst_width); if (width > dst_width || width > src_width) { - PMD_DRV_LOG(ERR, "Cannot modify more bits than the width of a field"); + PMD_DRV_LOG(ERR, "Can not modify more bits than the width of a field."); return -EINVAL; } if (!nfp_flow_is_validate_field_data(dst_data, width, dst_width)) { - PMD_DRV_LOG(ERR, "The dest field data has problem"); + PMD_DRV_LOG(ERR, "The dest field data has problem."); return -EINVAL; } @@ -1505,14 +1505,14 @@ nfp_flow_action_check_queue(struct nfp_action_calculate_param *param) repr = param->dev->data->dev_private; if (!nfp_flow_support_partial(repr)) { - PMD_DRV_LOG(ERR, "Queue action not supported"); + PMD_DRV_LOG(ERR, "Queue action not supported."); return -ENOTSUP; } queue = param->action->conf; if (queue->index >= param->dev->data->nb_rx_queues || param->dev->data->rx_queues[queue->index] == NULL) { - PMD_DRV_LOG(ERR, "Queue index is illegal"); + PMD_DRV_LOG(ERR, "Queue index is illegal."); return -EINVAL; } @@ -1541,7 +1541,7 @@ nfp_flow_key_layers_check_actions(struct rte_eth_dev *dev, for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { if (action->type >= RTE_DIM(check_action_fns)) { - PMD_DRV_LOG(ERR, "Flow action %d unsupported", action->type); + PMD_DRV_LOG(ERR, "Flow action %d unsupported.", action->type); return -ERANGE; } @@ -1551,7 +1551,7 @@ nfp_flow_key_layers_check_actions(struct rte_eth_dev *dev, param.action = action; ret = check_action_fns[action->type](¶m); if (ret != 0) { - PMD_DRV_LOG(ERR, "Flow action %d calculate fail", action->type); + PMD_DRV_LOG(ERR, "Flow action %d calculate fail.", action->type); return ret; } } @@ -1790,7 +1790,7 @@ nfp_flow_key_layers_calculate_actions(struct rte_eth_dev *dev, } if (action->type >= RTE_DIM(action_fns) || action_fns[action->type] == NULL) { - PMD_DRV_LOG(ERR, "Flow action %d unsupported", action->type); + PMD_DRV_LOG(ERR, "Flow action %d unsupported.", action->type); return -ERANGE; } @@ -1800,7 +1800,7 @@ nfp_flow_key_layers_calculate_actions(struct rte_eth_dev *dev, if (param.flag->partial_both_flag && key_ls->act_size != sizeof(struct nfp_fl_act_partial)) { - PMD_DRV_LOG(ERR, "Mark and Queue can not be offloaded with other actions"); + PMD_DRV_LOG(ERR, "Mark and Queue can not be offloaded with other actions."); return -ENOTSUP; } @@ -1831,26 +1831,26 @@ nfp_flow_key_layers_calculate(struct rte_eth_dev *dev, ret = nfp_flow_key_layers_check_items(items, ¶m); if (ret != 0) { - PMD_DRV_LOG(ERR, "flow items check failed"); + PMD_DRV_LOG(ERR, "Flow items check failed."); return ret; } memset(param.flag, 0, sizeof(struct nfp_item_flag)); ret = nfp_flow_key_layers_calculate_items(items, ¶m); if (ret != 0) { - PMD_DRV_LOG(ERR, "flow items calculate failed"); + PMD_DRV_LOG(ERR, "Flow items calculate failed."); return ret; } ret = nfp_flow_key_layers_check_actions(dev, actions); if (ret != 0) { - PMD_DRV_LOG(ERR, "flow actions check failed"); + PMD_DRV_LOG(ERR, "Flow actions check failed."); return ret; } ret = nfp_flow_key_layers_calculate_actions(dev, actions, key_ls); if (ret != 0) { - PMD_DRV_LOG(ERR, "flow actions check failed"); + PMD_DRV_LOG(ERR, "Flow actions check failed."); return ret; } @@ -1893,7 +1893,7 @@ nfp_flow_merge_eth(struct nfp_flow_merge_param *param) item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge eth: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge eth: no item->spec!"); goto eth_end; } @@ -1941,7 +1941,7 @@ nfp_flow_merge_vlan(struct nfp_flow_merge_param *param) item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge vlan: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge vlan: no item->spec!"); return 0; } @@ -1979,7 +1979,7 @@ nfp_flow_merge_ipv4(struct nfp_flow_merge_param *param) if (param->is_outer_layer && nfp_flow_is_tunnel(param->nfp_flow)) { if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge ipv4: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge ipv4: no item->spec!"); return 0; } @@ -2010,7 +2010,7 @@ nfp_flow_merge_ipv4(struct nfp_flow_merge_param *param) *param->mbuf_off += sizeof(struct nfp_flower_tp_ports); if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge ipv4: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge ipv4: no item->spec!"); goto ipv4_end; } @@ -2053,7 +2053,7 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param) if (param->is_outer_layer && nfp_flow_is_tunnel(param->nfp_flow)) { if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge ipv6: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge ipv6: no item->spec!"); return 0; } @@ -2089,7 +2089,7 @@ nfp_flow_merge_ipv6(struct nfp_flow_merge_param *param) *param->mbuf_off += sizeof(struct nfp_flower_tp_ports); if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge ipv6: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge ipv6: no item->spec!"); goto ipv6_end; } @@ -2142,14 +2142,14 @@ nfp_flow_merge_tcp(struct nfp_flow_merge_param *param) ports = (struct nfp_flower_tp_ports *) ((char *)ipv6 - sizeof(struct nfp_flower_tp_ports)); } else { - PMD_DRV_LOG(ERR, "nfp flow merge tcp: no L3 layer!"); + PMD_DRV_LOG(ERR, "NFP flow merge tcp: no L3 layer!"); return -EINVAL; } item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge tcp: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge tcp: no item->spec!"); return 0; } @@ -2228,14 +2228,14 @@ nfp_flow_merge_udp(struct nfp_flow_merge_param *param) ports = (struct nfp_flower_tp_ports *) ((char *)ipv6 - sizeof(struct nfp_flower_tp_ports)); } else { - PMD_DRV_LOG(ERR, "nfp flow merge udp: no L3 layer!"); + PMD_DRV_LOG(ERR, "NFP flow merge udp: no L3 layer!"); return -EINVAL; } item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge udp: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge udp: no item->spec!"); return 0; } @@ -2282,14 +2282,14 @@ nfp_flow_merge_sctp(struct nfp_flow_merge_param *param) ports = (struct nfp_flower_tp_ports *) ((char *)ipv6 - sizeof(struct nfp_flower_tp_ports)); } else { - PMD_DRV_LOG(ERR, "nfp flow merge sctp: no L3 layer!"); + PMD_DRV_LOG(ERR, "NFP flow merge sctp: no L3 layer!"); return -EINVAL; } item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge sctp: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge sctp: no item->spec!"); return 0; } @@ -2325,7 +2325,7 @@ nfp_flow_merge_vxlan(struct nfp_flow_merge_param *param) item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge vxlan: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge vxlan: no item->spec!"); goto vxlan_end; } @@ -2375,7 +2375,7 @@ nfp_flow_merge_geneve(struct nfp_flow_merge_param *param) item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge geneve: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge geneve: no item->spec!"); goto geneve_end; } @@ -2457,7 +2457,7 @@ nfp_flow_merge_gre_key(struct nfp_flow_merge_param *param) item = param->item; spec = item->spec; if (spec == NULL) { - PMD_DRV_LOG(DEBUG, "nfp flow merge gre key: no item->spec!"); + PMD_DRV_LOG(DEBUG, "NFP flow merge gre key: no item->spec!"); goto gre_key_end; } @@ -2652,7 +2652,7 @@ nfp_flow_item_check(const struct rte_flow_item *item, /* item->last and item->mask cannot exist without item->spec. */ if (item->spec == NULL) { if (item->mask || item->last) { - PMD_DRV_LOG(ERR, "'mask' or 'last' field provided" + PMD_DRV_LOG(ERR, "The 'mask' or 'last' field provided" " without a corresponding 'spec'."); return -EINVAL; } @@ -2764,7 +2764,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr, } if (proc == NULL) { - PMD_DRV_LOG(ERR, "No next item provided for %d", item->type); + PMD_DRV_LOG(ERR, "No next item provided for %d.", item->type); ret = -ENOTSUP; break; } @@ -2772,13 +2772,13 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr, /* Perform basic sanity checks */ ret = nfp_flow_item_check(item, proc); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow item %d check failed", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d check failed.", item->type); ret = -EINVAL; break; } if (proc->merge == NULL) { - PMD_DRV_LOG(ERR, "nfp flow item %d no proc function", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d no proc function.", item->type); ret = -ENOTSUP; break; } @@ -2796,7 +2796,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr, param.is_mask = false; ret = proc->merge(¶m); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow item %d exact merge failed", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed.", item->type); break; } @@ -2805,7 +2805,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr, param.is_mask = true; ret = proc->merge(¶m); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow item %d mask merge failed", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d mask merge failed.", item->type); break; } @@ -2853,7 +2853,7 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor, ret = nfp_flow_compile_item_proc(representor, loop_item, nfp_flow, &mbuf_off_exact, &mbuf_off_mask, is_outer_layer); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow item compile failed."); + PMD_DRV_LOG(ERR, "NFP flow item compile failed."); return -EINVAL; } @@ -2862,7 +2862,7 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor, ret = nfp_flow_compile_item_proc(representor, items, nfp_flow, &mbuf_off_exact, &mbuf_off_mask, true); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow outer item compile failed."); + PMD_DRV_LOG(ERR, "NFP flow outer item compile failed."); return -EINVAL; } } @@ -3497,7 +3497,7 @@ nfp_flower_del_tun_neigh(struct nfp_app_fw_flower *app_fw_flower, } if (!flag) { - PMD_DRV_LOG(DEBUG, "Can't find nn entry in the nn list"); + PMD_DRV_LOG(DEBUG, "Can not find nn entry in the nn list."); return -EINVAL; } @@ -3542,7 +3542,7 @@ nfp_flower_del_tun_neigh(struct nfp_app_fw_flower *app_fw_flower, } if (ret != 0) { - PMD_DRV_LOG(DEBUG, "Failed to send the nn entry"); + PMD_DRV_LOG(DEBUG, "Failed to send the nn entry."); return -EINVAL; } @@ -3678,7 +3678,7 @@ nfp_pre_tun_table_search(struct nfp_flow_priv *priv, hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); index = rte_hash_lookup_data(priv->pre_tun_table, &hash_key, (void **)&mac_index); if (index < 0) { - PMD_DRV_LOG(DEBUG, "Data NOT found in the hash table"); + PMD_DRV_LOG(DEBUG, "Data NOT found in the hash table."); return NULL; } @@ -3696,7 +3696,7 @@ nfp_pre_tun_table_add(struct nfp_flow_priv *priv, hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); ret = rte_hash_add_key_data(priv->pre_tun_table, &hash_key, hash_data); if (ret != 0) { - PMD_DRV_LOG(ERR, "Add to pre tunnel table failed"); + PMD_DRV_LOG(ERR, "Add to pre tunnel table failed."); return false; } @@ -3714,7 +3714,7 @@ nfp_pre_tun_table_delete(struct nfp_flow_priv *priv, hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed); ret = rte_hash_del_key(priv->pre_tun_table, &hash_key); if (ret < 0) { - PMD_DRV_LOG(ERR, "Delete from pre tunnel table failed"); + PMD_DRV_LOG(ERR, "Delete from pre tunnel table failed."); return false; } @@ -3734,14 +3734,14 @@ nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr, priv = repr->app_fw_flower->flow_priv; if (priv->pre_tun_cnt >= NFP_TUN_PRE_TUN_RULE_LIMIT) { - PMD_DRV_LOG(ERR, "Pre tunnel table has full"); + PMD_DRV_LOG(ERR, "Pre tunnel table has full."); return -EINVAL; } entry_size = sizeof(struct nfp_pre_tun_entry); entry = rte_zmalloc("nfp_pre_tun", entry_size, 0); if (entry == NULL) { - PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table"); + PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table."); return -ENOMEM; } @@ -3803,7 +3803,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, entry_size = sizeof(struct nfp_pre_tun_entry); entry = rte_zmalloc("nfp_pre_tun", entry_size, 0); if (entry == NULL) { - PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table"); + PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table."); return -ENOMEM; } @@ -3837,7 +3837,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, ret = nfp_flower_cmsg_tun_mac_rule(repr->app_fw_flower, &repr->mac_addr, nfp_mac_idx, true); if (ret != 0) { - PMD_DRV_LOG(ERR, "Send tunnel mac rule failed"); + PMD_DRV_LOG(ERR, "Send tunnel mac rule failed."); ret = -EINVAL; goto free_entry; } @@ -3846,7 +3846,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, ret = nfp_flower_cmsg_pre_tunnel_rule(repr->app_fw_flower, nfp_flow_meta, nfp_mac_idx, true); if (ret != 0) { - PMD_DRV_LOG(ERR, "Send pre tunnel rule failed"); + PMD_DRV_LOG(ERR, "Send pre tunnel rule failed."); ret = -EINVAL; goto free_entry; } @@ -3854,7 +3854,7 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, find_entry->ref_cnt = 1U; if (!nfp_pre_tun_table_delete(priv, (char *)find_entry, entry_size)) { - PMD_DRV_LOG(ERR, "Delete entry from pre tunnel table failed"); + PMD_DRV_LOG(ERR, "Delete entry from pre tunnel table failed."); ret = -EINVAL; goto free_entry; } @@ -3881,7 +3881,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr, ret = nfp_pre_tun_table_check_add(repr, &nfp_mac_idx); if (ret != 0) { - PMD_DRV_LOG(ERR, "Pre tunnel table add failed"); + PMD_DRV_LOG(ERR, "Pre tunnel table add failed."); return -EINVAL; } @@ -3895,7 +3895,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr, ret = nfp_flower_cmsg_tun_mac_rule(app_fw_flower, &repr->mac_addr, nfp_mac_idx, false); if (ret != 0) { - PMD_DRV_LOG(ERR, "Send tunnel mac rule failed"); + PMD_DRV_LOG(ERR, "Send tunnel mac rule failed."); return -EINVAL; } @@ -3903,7 +3903,7 @@ nfp_flow_action_tunnel_decap(struct nfp_flower_representor *repr, ret = nfp_flower_cmsg_pre_tunnel_rule(app_fw_flower, nfp_flow_meta, nfp_mac_idx, false); if (ret != 0) { - PMD_DRV_LOG(ERR, "Send pre tunnel rule failed"); + PMD_DRV_LOG(ERR, "Send pre tunnel rule failed."); return -EINVAL; } } @@ -4144,17 +4144,17 @@ nfp_flow_action_meter(struct nfp_flower_representor *representor, mtr = nfp_mtr_find_by_mtr_id(app_fw_flower->mtr_priv, meter->mtr_id); if (mtr == NULL) { - PMD_DRV_LOG(ERR, "Meter id not exist"); + PMD_DRV_LOG(ERR, "Meter id not exist."); return -EINVAL; } if (!mtr->enable) { - PMD_DRV_LOG(ERR, "Requested meter disable"); + PMD_DRV_LOG(ERR, "Requested meter disable."); return -EINVAL; } if (!mtr->shared && mtr->ref_cnt > 0) { - PMD_DRV_LOG(ERR, "Can't use a used unshared meter"); + PMD_DRV_LOG(ERR, "Can not use a used unshared meter."); return -EINVAL; } @@ -4349,7 +4349,7 @@ nfp_flow_action_compile_output(struct nfp_action_compile_param *param) ret = nfp_flow_action_output(param->position, param->action, param->nfp_flow_meta, output_cnt); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process output action"); + PMD_DRV_LOG(ERR, "Failed process output action."); return ret; } @@ -4400,7 +4400,7 @@ nfp_flow_action_compile_push_vlan(struct nfp_action_compile_param *param) ret = nfp_flow_action_push_vlan(param->position, param->action); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN"); + PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN."); return ret; } @@ -4539,7 +4539,7 @@ nfp_flow_action_compile_vxlan_encap(struct nfp_action_compile_param *param) param->position, param->action_data, param->action, param->nfp_flow_meta, ¶m->nfp_flow->tun); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP"); + PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP."); return ret; } @@ -4559,7 +4559,7 @@ nfp_flow_action_compile_raw_encap(struct nfp_action_compile_param *param) param->position, param->action_data, param->action, param->nfp_flow_meta, ¶m->nfp_flow->tun); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RAW_ENCAP"); + PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RAW_ENCAP."); return ret; } @@ -4578,7 +4578,7 @@ nfp_flow_action_compile_tnl_decap(struct nfp_action_compile_param *param) ret = nfp_flow_action_tunnel_decap(param->repr, param->action, param->nfp_flow_meta, param->nfp_flow); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process tunnel decap"); + PMD_DRV_LOG(ERR, "Failed process tunnel decap."); return ret; } @@ -4598,7 +4598,7 @@ nfp_flow_action_compile_meter(struct nfp_action_compile_param *param) ret = nfp_flow_action_meter(param->repr, param->action, param->position, ¶m->nfp_flow->mtr_id); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_METER"); + PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_METER."); return -EINVAL; } @@ -4683,7 +4683,7 @@ nfp_flow_action_compile_rss(struct nfp_action_compile_param *param) ret = nfp_flow_action_rss_add(param->repr, param->action, ¶m->nfp_flow->rss); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RSS"); + PMD_DRV_LOG(ERR, "Failed process RTE_FLOW_ACTION_TYPE_RSS."); return ret; } @@ -4747,7 +4747,7 @@ nfp_flow_action_compile_modify(struct nfp_action_compile_param *param) } else if (conf->src.field == RTE_FLOW_FIELD_VALUE) { action.conf = (void *)(uintptr_t)&conf->src.value; } else { - PMD_DRV_LOG(ERR, "The SRC field of flow modify is not right"); + PMD_DRV_LOG(ERR, "The SRC field of flow modify is not right."); return -EINVAL; } @@ -4757,7 +4757,7 @@ nfp_flow_action_compile_modify(struct nfp_action_compile_param *param) param->action = &action; ret = nfp_flow_action_compile_modify_dispatch(param, conf->dst.field); if (ret != 0) - PMD_DRV_LOG(ERR, "Something wrong when modify dispatch"); + PMD_DRV_LOG(ERR, "Something wrong when modify dispatch."); /* Reload the old action pointer */ param->action = action_old; @@ -4825,14 +4825,14 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { if (action->type >= RTE_DIM(action_compile_fns) || action_compile_fns[action->type] == NULL) { - PMD_DRV_LOG(ERR, "Flow action %d unsupported", action->type); + PMD_DRV_LOG(ERR, "Flow action %d unsupported.", action->type); return -ERANGE; } param.action = action; ret = action_compile_fns[action->type](¶m); if (ret != 0) { - PMD_DRV_LOG(ERR, "Flow action %d compile fail", action->type); + PMD_DRV_LOG(ERR, "Flow action %d compile fail.", action->type); return ret; } @@ -4840,7 +4840,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, } if (nfp_flow->install_flag && total_actions == 0) { - PMD_DRV_LOG(ERR, "The action list is empty"); + PMD_DRV_LOG(ERR, "The action list is empty."); return -ENOTSUP; } @@ -4888,7 +4888,7 @@ nfp_flow_process(struct rte_eth_dev *dev, ret = nfp_stats_id_alloc(representor->app_fw_flower, &stats_ctx); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp stats id alloc failed."); + PMD_DRV_LOG(ERR, "NFP stats id alloc failed."); return NULL; } @@ -4906,13 +4906,13 @@ nfp_flow_process(struct rte_eth_dev *dev, ret = nfp_flow_compile_items(representor, items, nfp_flow); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow item process failed."); + PMD_DRV_LOG(ERR, "NFP flow item process failed."); goto free_flow; } ret = nfp_flow_compile_action(representor, actions, nfp_flow); if (ret != 0) { - PMD_DRV_LOG(ERR, "nfp flow action process failed."); + PMD_DRV_LOG(ERR, "NFP flow action process failed."); goto free_flow; } @@ -4921,7 +4921,7 @@ nfp_flow_process(struct rte_eth_dev *dev, mask_len = key_layer.key_size; if (!nfp_check_mask_add(representor->app_fw_flower, mask_data, mask_len, &nfp_flow_meta->flags, &new_mask_id)) { - PMD_DRV_LOG(ERR, "nfp mask add check failed."); + PMD_DRV_LOG(ERR, "NFP mask add check failed."); goto free_flow; } @@ -4938,7 +4938,7 @@ nfp_flow_process(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "This flow is already exist."); if (!nfp_check_mask_remove(representor->app_fw_flower, mask_data, mask_len, &nfp_flow_meta->flags)) { - PMD_DRV_LOG(ERR, "nfp mask del check failed."); + PMD_DRV_LOG(ERR, "NFP mask del check failed."); } goto free_flow; } @@ -5011,7 +5011,7 @@ nfp_flow_teardown(struct nfp_app_fw_flower *app_fw_flower, nfp_flow_meta->flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; if (!nfp_check_mask_remove(app_fw_flower, mask_data, mask_len, &nfp_flow_meta->flags)) { - PMD_DRV_LOG(ERR, "nfp mask del check failed."); + PMD_DRV_LOG(ERR, "NFP mask del check failed."); return -EINVAL; } @@ -5510,20 +5510,20 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl, "CONFIG_FC_HOST_CTX_COUNT", &ret); if (ret < 0) { - PMD_INIT_LOG(ERR, "Read CTX_COUNT from symbol table failed"); + PMD_INIT_LOG(ERR, "Read CTX_COUNT from symbol table failed."); goto exit; } ctx_split = nfp_rtsym_read_le(pf_dev->sym_tbl, "CONFIG_FC_HOST_CTX_SPLIT", &ret); if (ret < 0) { - PMD_INIT_LOG(ERR, "Read CTX_SPLIT from symbol table failed"); + PMD_INIT_LOG(ERR, "Read CTX_SPLIT from symbol table failed."); goto exit; } priv = rte_zmalloc("nfp_app_flow_priv", sizeof(struct nfp_flow_priv), 0); if (priv == NULL) { - PMD_INIT_LOG(ERR, "nfp app flow priv creation failed"); + PMD_INIT_LOG(ERR, "NFP app flow priv creation failed."); ret = -ENOMEM; goto exit; } @@ -5541,7 +5541,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) priv->mask_ids.free_list.buf = rte_zmalloc("nfp_app_mask_ids", NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS, 0); if (priv->mask_ids.free_list.buf == NULL) { - PMD_INIT_LOG(ERR, "mask id free list creation failed"); + PMD_INIT_LOG(ERR, "Mask id free list creation failed."); ret = -ENOMEM; goto free_priv; } @@ -5551,7 +5551,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) priv->stats_ids.free_list.buf = rte_zmalloc("nfp_app_stats_ids", priv->stats_ring_size * NFP_FL_STATS_ELEM_RS, 0); if (priv->stats_ids.free_list.buf == NULL) { - PMD_INIT_LOG(ERR, "stats id free list creation failed"); + PMD_INIT_LOG(ERR, "Stats id free list creation failed."); ret = -ENOMEM; goto free_mask_id; } @@ -5561,12 +5561,12 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) rte_spinlock_init(&priv->stats_lock); stats_size = (ctx_count & NFP_FL_STAT_ID_STAT) | ((ctx_split - 1) & NFP_FL_STAT_ID_MU_NUM); - PMD_INIT_LOG(INFO, "ctx_count:%0lx, ctx_split:%0lx, stats_size:%0lx ", + PMD_INIT_LOG(INFO, "The ctx_count:%0lx, ctx_split:%0lx, stats_size:%0lx .", ctx_count, ctx_split, stats_size); priv->stats = rte_zmalloc("nfp_flow_stats", stats_size * sizeof(struct nfp_fl_stats), 0); if (priv->stats == NULL) { - PMD_INIT_LOG(ERR, "flow stats creation failed"); + PMD_INIT_LOG(ERR, "Flow stats creation failed."); ret = -ENOMEM; goto free_stats_id; } @@ -5575,7 +5575,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) mask_hash_params.hash_func_init_val = priv->hash_seed; priv->mask_table = rte_hash_create(&mask_hash_params); if (priv->mask_table == NULL) { - PMD_INIT_LOG(ERR, "mask hash table creation failed"); + PMD_INIT_LOG(ERR, "Mask hash table creation failed."); ret = -ENOMEM; goto free_stats; } @@ -5585,7 +5585,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) flow_hash_params.entries = ctx_count; priv->flow_table = rte_hash_create(&flow_hash_params); if (priv->flow_table == NULL) { - PMD_INIT_LOG(ERR, "flow hash table creation failed"); + PMD_INIT_LOG(ERR, "Flow hash table creation failed."); ret = -ENOMEM; goto free_mask_table; } @@ -5595,7 +5595,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) pre_tun_hash_params.hash_func_init_val = priv->hash_seed; priv->pre_tun_table = rte_hash_create(&pre_tun_hash_params); if (priv->pre_tun_table == NULL) { - PMD_INIT_LOG(ERR, "Pre tunnel table creation failed"); + PMD_INIT_LOG(ERR, "Pre tunnel table creation failed."); ret = -ENOMEM; goto free_flow_table; } @@ -5604,7 +5604,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) ct_zone_hash_params.hash_func_init_val = priv->hash_seed; priv->ct_zone_table = rte_hash_create(&ct_zone_hash_params); if (priv->ct_zone_table == NULL) { - PMD_INIT_LOG(ERR, "ct zone table creation failed"); + PMD_INIT_LOG(ERR, "CT zone table creation failed."); ret = -ENOMEM; goto free_pre_tnl_table; } @@ -5614,7 +5614,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) ct_map_hash_params.entries = ctx_count; priv->ct_map_table = rte_hash_create(&ct_map_hash_params); if (priv->ct_map_table == NULL) { - PMD_INIT_LOG(ERR, "ct map table creation failed"); + PMD_INIT_LOG(ERR, "CT map table creation failed."); ret = -ENOMEM; goto free_ct_zone_table; } diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c index eae6ba39e1..4017f602a2 100644 --- a/drivers/net/nfp/flower/nfp_flower_representor.c +++ b/drivers/net/nfp/flower/nfp_flower_representor.c @@ -24,6 +24,94 @@ struct nfp_repr_init { struct nfp_net_hw_priv *hw_priv; }; +static int +nfp_repr_get_eeprom_len(struct rte_eth_dev *dev) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_get_eeprom_len(dev); +} + +static int +nfp_repr_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_get_eeprom(dev, eeprom); +} + +static int +nfp_repr_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_set_eeprom(dev, eeprom); +} + +static int +nfp_repr_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *info) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_get_module_info(dev, info); +} + +static int +nfp_repr_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_get_module_eeprom(dev, info); +} + +static int +nfp_flower_repr_led_on(struct rte_eth_dev *dev) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_led_on(dev); +} + +static int +nfp_flower_repr_led_off(struct rte_eth_dev *dev) +{ + struct nfp_flower_representor *repr; + + repr = dev->data->dev_private; + if (!nfp_flower_repr_is_phy(repr)) + return -EOPNOTSUPP; + + return nfp_net_led_off(dev); +} + static int nfp_flower_repr_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) @@ -96,7 +184,7 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev) hw_priv = dev->process_private; app_fw_flower = repr->app_fw_flower; - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + if (nfp_flower_repr_is_phy(repr)) { ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, repr->nfp_idx, 1); if (ret < 0) return ret; @@ -127,7 +215,7 @@ nfp_flower_repr_dev_stop(struct rte_eth_dev *dev) nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, false); - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + if (nfp_flower_repr_is_phy(repr)) { ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, repr->nfp_idx, 0); if (ret == 1) ret = 0; @@ -167,7 +255,7 @@ nfp_flower_repr_rx_queue_setup(struct rte_eth_dev *dev, repr->ring[rx_queue_id] = rte_ring_create(ring_name, nb_rx_desc, rte_socket_id(), 0); if (repr->ring[rx_queue_id] == NULL) { - PMD_DRV_LOG(ERR, "rte_ring_create failed for rx queue %u", rx_queue_id); + PMD_DRV_LOG(ERR, "The rte_ring_create failed for rx queue %u.", rx_queue_id); rte_free(rxq); return -ENOMEM; } @@ -259,7 +347,7 @@ nfp_flower_repr_rx_burst(void *rx_queue, rxq = rx_queue; if (unlikely(rxq == NULL)) { - PMD_RX_LOG(ERR, "RX Bad queue"); + PMD_RX_LOG(ERR, "RX Bad queue."); return 0; } @@ -267,7 +355,7 @@ nfp_flower_repr_rx_burst(void *rx_queue, repr = dev->data->dev_private; if (unlikely(repr->ring == NULL) || unlikely(repr->ring[rxq->qidx] == NULL)) { - PMD_RX_LOG(ERR, "representor %s has no ring configured!", + PMD_RX_LOG(ERR, "Representor %s has no ring configured!", repr->name); return 0; } @@ -275,7 +363,7 @@ nfp_flower_repr_rx_burst(void *rx_queue, total_dequeue = rte_ring_dequeue_burst(repr->ring[rxq->qidx], (void *)rx_pkts, nb_pkts, &available); if (total_dequeue != 0) { - PMD_RX_LOG(DEBUG, "Port: %#x, queue: %hu received: %u, available: %u", + PMD_RX_LOG(DEBUG, "Port: %#x, queue: %hu received: %u, available: %u.", repr->port_id, rxq->qidx, total_dequeue, available); data_len = 0; @@ -306,7 +394,7 @@ nfp_flower_repr_tx_burst(void *tx_queue, txq = tx_queue; if (unlikely(txq == NULL)) { - PMD_TX_LOG(ERR, "TX Bad queue"); + PMD_TX_LOG(ERR, "TX Bad queue."); return 0; } @@ -324,7 +412,7 @@ nfp_flower_repr_tx_burst(void *tx_queue, pf_tx_queue = dev->data->tx_queues[txq->qidx]; sent = nfp_flower_pf_xmit_pkts(pf_tx_queue, tx_pkts, nb_pkts); if (sent != 0) { - PMD_TX_LOG(DEBUG, "Port: %#x transmitted: %hu queue: %u", + PMD_TX_LOG(DEBUG, "Port: %#x transmitted: %hu queue: %u.", repr->port_id, sent, txq->qidx); data_len = 0; @@ -373,9 +461,15 @@ static void nfp_flower_repr_close_queue(struct rte_eth_dev *eth_dev, enum nfp_repr_type repr_type) { + struct nfp_net_hw_priv *hw_priv; + switch (repr_type) { case NFP_REPR_TYPE_PHYS_PORT: - nfp_flower_repr_free_queue(eth_dev); + hw_priv = eth_dev->process_private; + if (hw_priv->pf_dev->multi_pf.enabled) + nfp_flower_pf_repr_close_queue(eth_dev); + else + nfp_flower_repr_free_queue(eth_dev); break; case NFP_REPR_TYPE_PF: nfp_flower_pf_repr_close_queue(eth_dev); @@ -389,6 +483,12 @@ nfp_flower_repr_close_queue(struct rte_eth_dev *eth_dev, } } +static void +nfp_flower_repr_base_uninit(struct nfp_flower_representor *repr) +{ + rte_free(repr->repr_xstats_base); +} + static int nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev) { @@ -396,10 +496,10 @@ nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev) struct nfp_flower_representor *repr; repr = eth_dev->data->dev_private; - rte_free(repr->repr_xstats_base); + nfp_flower_repr_base_uninit(repr); rte_free(repr->ring); - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + if (nfp_flower_repr_is_phy(repr)) { index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); repr->app_fw_flower->phy_reprs[index] = NULL; } else { @@ -511,6 +611,47 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { .fw_version_get = nfp_net_firmware_version_get, }; +static const struct eth_dev_ops nfp_flower_multiple_pf_repr_dev_ops = { + .dev_infos_get = nfp_flower_repr_dev_infos_get, + + .dev_start = nfp_flower_pf_start, + .dev_configure = nfp_net_configure, + .dev_stop = nfp_flower_pf_stop, + .dev_close = nfp_flower_repr_dev_close, + + .rx_queue_setup = nfp_net_rx_queue_setup, + .tx_queue_setup = nfp_net_tx_queue_setup, + + .link_update = nfp_flower_repr_link_update, + + .stats_get = nfp_flower_repr_stats_get, + .stats_reset = nfp_flower_repr_stats_reset, + + .promiscuous_enable = nfp_net_promisc_enable, + .promiscuous_disable = nfp_net_promisc_disable, + + .mac_addr_set = nfp_flower_repr_mac_addr_set, + .fw_version_get = nfp_net_firmware_version_get, + + .flow_ops_get = nfp_flow_ops_get, + .mtr_ops_get = nfp_net_mtr_ops_get, + + .xstats_get = nfp_net_xstats_get, + .xstats_reset = nfp_net_xstats_reset, + .xstats_get_names = nfp_net_xstats_get_names, + .xstats_get_by_id = nfp_net_xstats_get_by_id, + .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, + + .get_eeprom_length = nfp_repr_get_eeprom_len, + .get_eeprom = nfp_repr_get_eeprom, + .set_eeprom = nfp_repr_set_eeprom, + .get_module_info = nfp_repr_get_module_info, + .get_module_eeprom = nfp_repr_get_module_eeprom, + + .dev_led_on = nfp_flower_repr_led_on, + .dev_led_off = nfp_flower_repr_led_off, +}; + static const struct eth_dev_ops nfp_flower_repr_dev_ops = { .dev_infos_get = nfp_flower_repr_dev_infos_get, @@ -541,6 +682,15 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { .xstats_get_names = nfp_net_xstats_get_names, .xstats_get_by_id = nfp_net_xstats_get_by_id, .xstats_get_names_by_id = nfp_net_xstats_get_names_by_id, + + .get_eeprom_length = nfp_repr_get_eeprom_len, + .get_eeprom = nfp_repr_get_eeprom, + .set_eeprom = nfp_repr_set_eeprom, + .get_module_info = nfp_repr_get_module_info, + .get_module_eeprom = nfp_repr_get_module_eeprom, + + .dev_led_on = nfp_flower_repr_led_on, + .dev_led_off = nfp_flower_repr_led_off, }; static uint32_t @@ -603,7 +753,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev, /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC"); + PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC."); return -ENOMEM; } @@ -616,6 +766,64 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev, return 0; } +static int +nfp_flower_repr_base_init(struct rte_eth_dev *eth_dev, + struct nfp_flower_representor *repr, + struct nfp_repr_init *repr_init) +{ + int ret; + struct nfp_flower_representor *init_repr_data; + + /* Cast the input representor data to the correct struct here */ + init_repr_data = repr_init->flower_repr; + + /* Copy data here from the input representor template */ + repr->idx = init_repr_data->idx; + repr->vf_id = init_repr_data->vf_id; + repr->switch_domain_id = init_repr_data->switch_domain_id; + repr->port_id = init_repr_data->port_id; + repr->nfp_idx = init_repr_data->nfp_idx; + repr->repr_type = init_repr_data->repr_type; + repr->app_fw_flower = init_repr_data->app_fw_flower; + + snprintf(repr->name, sizeof(repr->name), "%s", init_repr_data->name); + + /* This backer port is that of the eth_device created for the PF vNIC */ + eth_dev->data->backer_port_id = 0; + + /* Allocating memory for mac addr */ + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC."); + return -ENOMEM; + } + + rte_ether_addr_copy(&init_repr_data->mac_addr, &repr->mac_addr); + rte_ether_addr_copy(&init_repr_data->mac_addr, eth_dev->data->mac_addrs); + + /* Send reify message to hardware to inform it about the new repr */ + ret = nfp_flower_cmsg_repr_reify(init_repr_data->app_fw_flower, repr); + if (ret != 0) { + PMD_INIT_LOG(WARNING, "Failed to send repr reify message."); + goto mac_cleanup; + } + + /* Allocate memory for extended statistics counters */ + repr->repr_xstats_base = rte_zmalloc("rte_eth_xstat", + sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0); + if (repr->repr_xstats_base == NULL) { + PMD_INIT_LOG(ERR, "No memory for xstats base on device %s!", repr->name); + ret = -ENOMEM; + goto mac_cleanup; + } + + return 0; + +mac_cleanup: + rte_free(eth_dev->data->mac_addrs); + return ret; +} + static int nfp_flower_repr_init(struct rte_eth_dev *eth_dev, void *init_params) @@ -650,56 +858,30 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, sizeof(struct rte_ring *) * app_fw_flower->pf_hw->max_rx_queues, RTE_CACHE_LINE_SIZE, numa_node); if (repr->ring == NULL) { - PMD_DRV_LOG(ERR, "Ring create failed for %s", ring_name); + PMD_DRV_LOG(ERR, "Ring create failed for %s.", ring_name); return -ENOMEM; } - /* Copy data here from the input representor template */ - repr->idx = init_repr_data->idx; - repr->vf_id = init_repr_data->vf_id; - repr->switch_domain_id = init_repr_data->switch_domain_id; - repr->port_id = init_repr_data->port_id; - repr->nfp_idx = init_repr_data->nfp_idx; - repr->repr_type = init_repr_data->repr_type; - repr->app_fw_flower = init_repr_data->app_fw_flower; - - strlcpy(repr->name, init_repr_data->name, sizeof(repr->name)); - eth_dev->dev_ops = &nfp_flower_repr_dev_ops; eth_dev->rx_pkt_burst = nfp_flower_repr_rx_burst; eth_dev->tx_pkt_burst = nfp_flower_repr_tx_burst; eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR | RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) + ret = nfp_flower_repr_base_init(eth_dev, repr, repr_init); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Flower repr base init failed."); + goto ring_cleanup; + } + + if (nfp_flower_repr_is_phy(repr)) eth_dev->data->representor_id = repr->vf_id; else eth_dev->data->representor_id = repr->vf_id + app_fw_flower->num_phyport_reprs + 1; - /* This backer port is that of the eth_device created for the PF vNIC */ - eth_dev->data->backer_port_id = 0; - - /* Allocating memory for mac addr */ - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); - if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC"); - ret = -ENOMEM; - goto ring_cleanup; - } - - rte_ether_addr_copy(&init_repr_data->mac_addr, &repr->mac_addr); - rte_ether_addr_copy(&init_repr_data->mac_addr, eth_dev->data->mac_addrs); - - /* Send reify message to hardware to inform it about the new repr */ - ret = nfp_flower_cmsg_repr_reify(app_fw_flower, repr); - if (ret != 0) { - PMD_INIT_LOG(WARNING, "Failed to send repr reify message"); - goto mac_cleanup; - } - /* Add repr to correct array */ - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + if (nfp_flower_repr_is_phy(repr)) { index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); app_fw_flower->phy_reprs[index] = repr; } else { @@ -707,30 +889,64 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, app_fw_flower->vf_reprs[index] = repr; } - if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + if (nfp_flower_repr_is_phy(repr)) { repr->mac_stats = hw_priv->pf_dev->mac_stats_bar + (repr->nfp_idx * NFP_MAC_STATS_SIZE); } - /* Allocate memory for extended statistics counters */ - repr->repr_xstats_base = rte_zmalloc("rte_eth_xstat", - sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0); - if (repr->repr_xstats_base == NULL) { - PMD_INIT_LOG(ERR, "No memory for xstats base on device %s!", repr->name); - ret = -ENOMEM; - goto mac_cleanup; - } - return 0; -mac_cleanup: - rte_free(eth_dev->data->mac_addrs); ring_cleanup: rte_free(repr->ring); return ret; } +static int +nfp_flower_multiple_pf_repr_init(struct rte_eth_dev *eth_dev, + void *init_params) +{ + int ret; + uint16_t index; + struct nfp_repr_init *repr_init; + struct nfp_net_hw_priv *hw_priv; + struct nfp_flower_representor *repr; + struct nfp_app_fw_flower *app_fw_flower; + + /* Cast the input representor data to the correct struct here */ + repr_init = init_params; + app_fw_flower = repr_init->flower_repr->app_fw_flower; + + /* Memory has been allocated in the eth_dev_create() function */ + repr = eth_dev->data->dev_private; + hw_priv = repr_init->hw_priv; + + eth_dev->dev_ops = &nfp_flower_multiple_pf_repr_dev_ops; + eth_dev->rx_pkt_burst = nfp_flower_multiple_pf_recv_pkts; + eth_dev->tx_pkt_burst = nfp_flower_multiple_pf_xmit_pkts; + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR | + RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + + ret = nfp_flower_repr_base_init(eth_dev, repr, repr_init); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Flower multiple PF repr base init failed."); + return -ENOMEM; + } + + eth_dev->data->representor_id = repr->vf_id; + + /* Add repr to correct array */ + index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); + app_fw_flower->phy_reprs[index] = repr; + + repr->mac_stats = hw_priv->pf_dev->mac_stats_bar + + (repr->nfp_idx * NFP_MAC_STATS_SIZE); + + app_fw_flower->pf_ethdev = eth_dev; + + return 0; +} + static void nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower) { @@ -799,123 +1015,184 @@ nfp_flower_repr_priv_init(struct nfp_app_fw_flower *app_fw_flower, } static int -nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower, - struct nfp_net_hw_priv *hw_priv) +nfp_flower_phy_repr_alloc(struct nfp_net_hw_priv *hw_priv, + struct nfp_flower_representor *flower_repr, + const char *pci_name) { int i; int ret; uint8_t id; - const char *pci_name; struct nfp_pf_dev *pf_dev; - struct rte_pci_device *pci_dev; + ethdev_init_t ethdev_init; struct nfp_repr_init repr_init; - struct nfp_eth_table *nfp_eth_table; struct nfp_eth_table_port *eth_port; - struct nfp_flower_representor flower_repr = { - .switch_domain_id = app_fw_flower->switch_domain_id, - .app_fw_flower = app_fw_flower, - }; + struct nfp_app_fw_flower *app_fw_flower; pf_dev = hw_priv->pf_dev; - nfp_eth_table = pf_dev->nfp_eth_table; repr_init.hw_priv = hw_priv; - - /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */ - ret = nfp_flower_cmsg_mac_repr(app_fw_flower, pf_dev); - if (ret != 0) { - PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs"); - return ret; - } - - /* Create a rte_eth_dev for PF vNIC representor */ - flower_repr.repr_type = NFP_REPR_TYPE_PF; - flower_repr.idx = 0; - - /* PF vNIC reprs get a random MAC address */ - rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); - - pci_dev = pf_dev->pci_dev; - - pci_name = strchr(pci_dev->name, ':') + 1; - - if (pf_dev->multi_pf.enabled) - snprintf(flower_repr.name, sizeof(flower_repr.name), - "%s_repr_pf%d", pci_name, pf_dev->multi_pf.function_id); - else - snprintf(flower_repr.name, sizeof(flower_repr.name), - "%s_repr_pf", pci_name); - - /* Create a eth_dev for this representor */ - ret = rte_eth_dev_create(&pci_dev->device, flower_repr.name, - sizeof(struct nfp_flower_representor), - NULL, NULL, nfp_flower_pf_repr_init, &flower_repr); - if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to init the pf repr"); - return -EINVAL; - } - - /* Create a rte_eth_dev for every phyport representor */ + app_fw_flower = flower_repr->app_fw_flower; for (i = 0; i < app_fw_flower->num_phyport_reprs; i++) { id = nfp_function_id_get(pf_dev, i); - eth_port = &nfp_eth_table->ports[id]; - flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT; - flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index); - flower_repr.nfp_idx = eth_port->index; - flower_repr.vf_id = i + 1; - flower_repr.idx = id; + eth_port = &pf_dev->nfp_eth_table->ports[id]; + flower_repr->repr_type = NFP_REPR_TYPE_PHYS_PORT; + flower_repr->port_id = nfp_flower_get_phys_port_id(eth_port->index); + flower_repr->nfp_idx = eth_port->index; + flower_repr->idx = id; /* Copy the real mac of the interface to the representor struct */ - rte_ether_addr_copy(ð_port->mac_addr, &flower_repr.mac_addr); - snprintf(flower_repr.name, sizeof(flower_repr.name), - "%s_repr_p%d", pci_name, id); + rte_ether_addr_copy(ð_port->mac_addr, &flower_repr->mac_addr); /* * Create a eth_dev for this representor. * This will also allocate private memory for the device. */ - repr_init.flower_repr = &flower_repr; - ret = rte_eth_dev_create(&pci_dev->device, flower_repr.name, + repr_init.flower_repr = flower_repr; + if (pf_dev->multi_pf.enabled) { + repr_init.flower_repr->vf_id = i; + snprintf(flower_repr->name, sizeof(flower_repr->name), + "%s_repr_p", pci_name); + ethdev_init = nfp_flower_multiple_pf_repr_init; + } else { + repr_init.flower_repr->vf_id = i + 1; + snprintf(flower_repr->name, sizeof(flower_repr->name), + "%s_repr_p%d", pci_name, id); + ethdev_init = nfp_flower_repr_init; + } + ret = rte_eth_dev_create(&pf_dev->pci_dev->device, flower_repr->name, sizeof(struct nfp_flower_representor), - NULL, NULL, nfp_flower_repr_init, &repr_init); + NULL, NULL, ethdev_init, &repr_init); if (ret != 0) { - PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr"); + PMD_INIT_LOG(ERR, "Could not create eth_dev for repr."); break; } } if (i < app_fw_flower->num_phyport_reprs) - goto repr_free; + return -EIO; - /* - * Now allocate eth_dev's for VF representors. - * Also send reify messages. - */ + return 0; +} + +static int +nfp_flower_vf_repr_alloc(struct nfp_net_hw_priv *hw_priv, + struct nfp_flower_representor *flower_repr, + const char *pci_name) +{ + int i; + int ret; + struct nfp_pf_dev *pf_dev; + struct nfp_repr_init repr_init; + struct nfp_app_fw_flower *app_fw_flower; + + pf_dev = hw_priv->pf_dev; + repr_init.hw_priv = hw_priv; + app_fw_flower = flower_repr->app_fw_flower; for (i = 0; i < app_fw_flower->num_vf_reprs; i++) { - flower_repr.repr_type = NFP_REPR_TYPE_VF; - flower_repr.port_id = nfp_get_pcie_port_id(pf_dev->cpp, + flower_repr->repr_type = NFP_REPR_TYPE_VF; + flower_repr->port_id = nfp_get_pcie_port_id(pf_dev->cpp, NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, i + pf_dev->vf_base_id, 0); - flower_repr.nfp_idx = 0; - flower_repr.vf_id = i; - flower_repr.idx = 0; + flower_repr->nfp_idx = 0; + flower_repr->vf_id = i; + flower_repr->idx = nfp_function_id_get(pf_dev, 0); /* VF reprs get a random MAC address */ - rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); - snprintf(flower_repr.name, sizeof(flower_repr.name), + rte_eth_random_addr(flower_repr->mac_addr.addr_bytes); + snprintf(flower_repr->name, sizeof(flower_repr->name), "%s_repr_vf%d", pci_name, i); - repr_init.flower_repr = &flower_repr; + repr_init.flower_repr = flower_repr; /* This will also allocate private memory for the device */ - ret = rte_eth_dev_create(&pci_dev->device, flower_repr.name, + ret = rte_eth_dev_create(&pf_dev->pci_dev->device, flower_repr->name, sizeof(struct nfp_flower_representor), NULL, NULL, nfp_flower_repr_init, &repr_init); if (ret != 0) { - PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr"); + PMD_INIT_LOG(ERR, "Could not create eth_dev for repr."); break; } } if (i < app_fw_flower->num_vf_reprs) + return -EIO; + + return 0; +} + +static int +nfp_flower_pf_repr_alloc(struct nfp_net_hw_priv *hw_priv, + struct nfp_flower_representor *flower_repr, + const char *pci_name) +{ + int ret; + struct nfp_pf_dev *pf_dev; + + pf_dev = hw_priv->pf_dev; + if (pf_dev->multi_pf.enabled) + return 0; + + /* Create a rte_eth_dev for PF vNIC representor */ + flower_repr->repr_type = NFP_REPR_TYPE_PF; + flower_repr->idx = 0; + + /* PF vNIC reprs get a random MAC address */ + rte_eth_random_addr(flower_repr->mac_addr.addr_bytes); + + snprintf(flower_repr->name, sizeof(flower_repr->name), + "%s_repr_pf", pci_name); + + /* Create a eth_dev for this representor */ + ret = rte_eth_dev_create(&pf_dev->pci_dev->device, flower_repr->name, + sizeof(struct nfp_flower_representor), + NULL, NULL, nfp_flower_pf_repr_init, flower_repr); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to init the pf repr."); + return -EINVAL; + } + + return 0; +} + +static int +nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower, + struct nfp_net_hw_priv *hw_priv) +{ + int ret; + const char *pci_name; + struct nfp_flower_representor flower_repr = { + .switch_domain_id = app_fw_flower->switch_domain_id, + .app_fw_flower = app_fw_flower, + }; + + /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */ + ret = nfp_flower_cmsg_mac_repr(app_fw_flower, hw_priv->pf_dev); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Could not send mac repr cmsgs."); + return ret; + } + + pci_name = strchr(hw_priv->pf_dev->pci_dev->name, ':') + 1; + + ret = nfp_flower_pf_repr_alloc(hw_priv, &flower_repr, pci_name); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Could not alloc pf repr."); + return ret; + } + + /* Create a rte_eth_dev for every phyport representor */ + ret = nfp_flower_phy_repr_alloc(hw_priv, &flower_repr, pci_name); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to init the phy repr."); goto repr_free; + } + + /* + * Now allocate eth_dev's for VF representors. + * Also send reify messages. + */ + ret = nfp_flower_vf_repr_alloc(hw_priv, &flower_repr, pci_name); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to init the vf repr."); + goto repr_free; + } nfp_flower_repr_priv_init(app_fw_flower, hw_priv); @@ -932,6 +1209,7 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower, struct nfp_net_hw_priv *hw_priv) { int ret; + uint8_t num_pf_reprs; struct nfp_pf_dev *pf_dev; struct rte_pci_device *pci_dev; struct rte_eth_devargs eth_da = { @@ -944,13 +1222,13 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower, /* Allocate a switch domain for the flower app */ ret = rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id); if (ret != 0) - PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device"); + PMD_INIT_LOG(WARNING, "Failed to allocate switch domain for device."); /* Now parse PCI device args passed for representor info */ if (pci_dev->device.devargs != NULL) { ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da, 1); if (ret < 0) { - PMD_INIT_LOG(ERR, "devarg parse failed"); + PMD_INIT_LOG(ERR, "Devarg parse failed."); return -EINVAL; } } @@ -960,34 +1238,39 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower, return 0; } - /* There always exist phy repr */ - if (eth_da.nb_representor_ports < pf_dev->total_phyports + 1) { + /* Calculate the number of pf repr */ + if (pf_dev->multi_pf.enabled) + num_pf_reprs = 0; + else + num_pf_reprs = 1; + + if (eth_da.nb_representor_ports < pf_dev->total_phyports + num_pf_reprs) { PMD_INIT_LOG(ERR, "Should also create repr port for phy port and PF vNIC."); return -ERANGE; } /* Only support VF representor creation via the command line */ if (eth_da.type != RTE_ETH_REPRESENTOR_VF) { - PMD_INIT_LOG(ERR, "Unsupported representor type: %d", eth_da.type); + PMD_INIT_LOG(ERR, "Unsupported representor type: %d.", eth_da.type); return -ENOTSUP; } /* Fill in flower app with repr counts */ app_fw_flower->num_phyport_reprs = pf_dev->total_phyports; app_fw_flower->num_vf_reprs = eth_da.nb_representor_ports - - pf_dev->total_phyports - 1; + pf_dev->total_phyports - num_pf_reprs; if (pf_dev->max_vfs != 0 && pf_dev->sriov_vf < app_fw_flower->num_vf_reprs) { - PMD_INIT_LOG(ERR, "The VF repr nums %d is bigger than VF nums %d", + PMD_INIT_LOG(ERR, "The VF repr nums %d is bigger than VF nums %d.", app_fw_flower->num_vf_reprs, pf_dev->sriov_vf); return -ERANGE; } - PMD_INIT_LOG(INFO, "%d number of VF reprs", app_fw_flower->num_vf_reprs); - PMD_INIT_LOG(INFO, "%d number of phyport reprs", app_fw_flower->num_phyport_reprs); + PMD_INIT_LOG(INFO, "%d number of VF reprs.", app_fw_flower->num_vf_reprs); + PMD_INIT_LOG(INFO, "%d number of phyport reprs.", app_fw_flower->num_phyport_reprs); ret = nfp_flower_repr_alloc(app_fw_flower, hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "representors allocation failed"); + PMD_INIT_LOG(ERR, "Representors allocation failed."); ret = -EINVAL; goto domain_free; } @@ -996,7 +1279,7 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower, domain_free: if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) - PMD_INIT_LOG(WARNING, "failed to free switch domain for device"); + PMD_INIT_LOG(WARNING, "Failed to free switch domain for device."); return ret; } @@ -1006,3 +1289,9 @@ nfp_flower_repr_is_vf(struct nfp_flower_representor *repr) { return repr->repr_type == NFP_REPR_TYPE_VF; } + +bool +nfp_flower_repr_is_phy(struct nfp_flower_representor *repr) +{ + return repr->repr_type == NFP_REPR_TYPE_PHYS_PORT; +} diff --git a/drivers/net/nfp/flower/nfp_flower_representor.h b/drivers/net/nfp/flower/nfp_flower_representor.h index 4211ddf798..3f6ee32fe4 100644 --- a/drivers/net/nfp/flower/nfp_flower_representor.h +++ b/drivers/net/nfp/flower/nfp_flower_representor.h @@ -30,6 +30,7 @@ struct nfp_flower_representor { int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower, struct nfp_net_hw_priv *hw_priv); bool nfp_flower_repr_is_vf(struct nfp_flower_representor *repr); +bool nfp_flower_repr_is_phy(struct nfp_flower_representor *repr); int nfp_flower_repr_stats_reset(struct rte_eth_dev *ethdev); #endif /* __NFP_FLOWER_REPRESENTOR_H__ */ diff --git a/drivers/net/nfp/flower/nfp_flower_service.c b/drivers/net/nfp/flower/nfp_flower_service.c index aac11dbb94..b4d987a980 100644 --- a/drivers/net/nfp/flower/nfp_flower_service.c +++ b/drivers/net/nfp/flower/nfp_flower_service.c @@ -155,7 +155,7 @@ nfp_flower_service_start(struct nfp_net_hw_priv *hw_priv) service_handle = nfp_flower_service_handle_get(hw_priv); if (service_handle == NULL) { - PMD_DRV_LOG(ERR, "Can not get service handle"); + PMD_DRV_LOG(ERR, "Can not get service handle."); return -EINVAL; } @@ -175,7 +175,7 @@ nfp_flower_service_start(struct nfp_net_hw_priv *hw_priv) /* Insert the NIC to flower service slot */ ret = nfp_flower_service_insert(hw_priv, service_handle); if (ret == MAX_FLOWER_SERVICE_SLOT) { - PMD_DRV_LOG(ERR, "Flower ctrl vnic service slot over %u", + PMD_DRV_LOG(ERR, "Flower ctrl vnic service slot over %u.", MAX_FLOWER_SERVICE_SLOT); return -ENOSPC; } @@ -192,7 +192,7 @@ nfp_flower_service_stop(struct nfp_net_hw_priv *hw_priv) service_handle = nfp_flower_service_handle_get(hw_priv); if (service_handle == NULL) { - PMD_DRV_LOG(ERR, "Can not get service handle"); + PMD_DRV_LOG(ERR, "Can not get service handle."); return; } diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c index 4ff1ae63b0..3ffcbb2576 100644 --- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c +++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c @@ -190,7 +190,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, switch (meta_info & NFP_NET_META_FIELD_MASK) { case NFP_NET_META_VLAN: if (vlan_layer > 0) { - PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported"); + PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported."); return -EINVAL; } nfp_net_meta_set_vlan(meta_data, pkt, layer); @@ -206,7 +206,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, ipsec_layer++; break; default: - PMD_DRV_LOG(ERR, "The metadata type not supported"); + PMD_DRV_LOG(ERR, "The metadata type not supported."); return -ENOTSUP; } @@ -249,7 +249,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, hw = txq->hw; txds = &txq->txds[txq->wr_p]; - PMD_TX_LOG(DEBUG, "working for queue %hu at pos %d and %hu packets", + PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %d and %hu packets.", txq->qidx, txq->wr_p, nb_pkts); if (nfp_net_nfd3_free_tx_desc(txq) < NFD3_TX_DESC_PER_PKT * nb_pkts || @@ -263,7 +263,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, pkt = *tx_pkts; issued_descs = 0; - PMD_TX_LOG(DEBUG, "queue: %hu. Sending %hu packets", txq->qidx, nb_pkts); + PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets.", txq->qidx, nb_pkts); /* Sending packets */ for (i = 0; i < nb_pkts && free_descs > 0; i++) { @@ -288,7 +288,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, if (unlikely(pkt->nb_segs > 1 && (hw->super.ctrl & NFP_NET_CFG_CTRL_GATHER) == 0)) { - PMD_TX_LOG(ERR, "Multisegment packet not supported"); + PMD_TX_LOG(ERR, "Multisegment packet not supported."); goto xmit_end; } @@ -396,14 +396,14 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc); if ((NFD3_TX_DESC_PER_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 0 || nb_desc > max_tx_desc || nb_desc < min_tx_desc) { - PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value."); return -EINVAL; } tx_free_thresh = (tx_conf->tx_free_thresh != 0) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; if (tx_free_thresh > nb_desc) { - PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX " + PMD_DRV_LOG(ERR, "The tx_free_thresh must be less than the number of TX " "descriptors. (tx_free_thresh=%u port=%d queue=%d)", tx_free_thresh, dev->data->port_id, queue_idx); return -EINVAL; @@ -414,7 +414,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, * calling nfp_net_stop(). */ if (dev->data->tx_queues[queue_idx] != NULL) { - PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d.", queue_idx); nfp_net_tx_queue_release(dev, queue_idx); dev->data->tx_queues[queue_idx] = NULL; @@ -424,7 +424,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) { - PMD_DRV_LOG(ERR, "Error allocating tx dma"); + PMD_DRV_LOG(ERR, "Error allocating tx dma."); return -ENOMEM; } @@ -439,7 +439,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocating tx dma"); + PMD_DRV_LOG(ERR, "Error allocating tx dma."); nfp_net_tx_queue_release(dev, queue_idx); dev->data->tx_queues[queue_idx] = NULL; return -ENOMEM; diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c index 68fcbe93da..15867ab62f 100644 --- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c +++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c @@ -195,7 +195,7 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, ipsec_layer++; break; default: - PMD_DRV_LOG(ERR, "The metadata type not supported"); + PMD_DRV_LOG(ERR, "The metadata type not supported."); return -ENOTSUP; } @@ -236,7 +236,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, txq = tx_queue; hw = txq->hw; - PMD_TX_LOG(DEBUG, "working for queue %hu at pos %d and %hu packets", + PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %d and %hu packets.", txq->qidx, txq->wr_p, nb_pkts); if (nfp_net_nfdk_free_tx_desc(txq) < NFDK_TX_DESC_PER_SIMPLE_PKT * nb_pkts || @@ -247,7 +247,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, if (unlikely(free_descs == 0)) return 0; - PMD_TX_LOG(DEBUG, "queue: %hu. Sending %hu packets", txq->qidx, nb_pkts); + PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets.", txq->qidx, nb_pkts); /* Sending packets */ while (npkts < nb_pkts && free_descs > 0) { @@ -289,7 +289,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, if (unlikely(pkt->nb_segs > 1 && (hw->super.ctrl & NFP_NET_CFG_CTRL_GATHER) == 0)) { - PMD_TX_LOG(ERR, "Multisegment packet not supported"); + PMD_TX_LOG(ERR, "Multisegment packet not supported."); goto xmit_end; } @@ -381,7 +381,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, if (RTE_ALIGN_FLOOR(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) != RTE_ALIGN_FLOOR(txq->wr_p + used_descs - 1, NFDK_TX_DESC_BLOCK_CNT)) { - PMD_TX_LOG(INFO, "Used descs cross block boundary"); + PMD_TX_LOG(INFO, "Used descs cross block boundary."); goto xmit_end; } @@ -431,7 +431,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, if ((NFDK_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 0 || (NFDK_TX_DESC_PER_SIMPLE_PKT * nb_desc) % NFDK_TX_DESC_BLOCK_CNT != 0 || nb_desc > max_tx_desc || nb_desc < min_tx_desc) { - PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value."); return -EINVAL; } @@ -439,7 +439,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, if (tx_free_thresh == 0) tx_free_thresh = DEFAULT_TX_FREE_THRESH; if (tx_free_thresh > nb_desc) { - PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX " + PMD_DRV_LOG(ERR, "The tx_free_thresh must be less than the number of TX " "descriptors. (tx_free_thresh=%u port=%d queue=%d)", tx_free_thresh, dev->data->port_id, queue_idx); return -EINVAL; @@ -450,7 +450,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, * calling nfp_net_stop(). */ if (dev->data->tx_queues[queue_idx] != NULL) { - PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d.", queue_idx); nfp_net_tx_queue_release(dev, queue_idx); dev->data->tx_queues[queue_idx] = NULL; @@ -460,7 +460,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) { - PMD_DRV_LOG(ERR, "Error allocating tx dma"); + PMD_DRV_LOG(ERR, "Error allocating tx dma."); return -ENOMEM; } @@ -474,7 +474,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev, tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocating tx dma"); + PMD_DRV_LOG(ERR, "Error allocating tx dma."); nfp_net_tx_queue_release(dev, queue_idx); return -ENOMEM; } diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c index 6d1359fdb1..8354b0378b 100644 --- a/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c +++ b/drivers/net/nfp/nfdk/nfp_nfdk_vec_avx2_dp.c @@ -152,7 +152,7 @@ nfp_net_nfdk_vec_avx2_xmit_simple_pkts(struct nfp_net_txq *txq, struct rte_mbuf **lmbuf; struct nfp_net_nfdk_tx_desc *ktxds; - PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %u and %hu packets", + PMD_TX_LOG(DEBUG, "Working for queue %hu at pos %u and %hu packets.", txq->qidx, txq->wr_p, nb_pkts); need_txds = nb_pkts << 1; @@ -167,7 +167,7 @@ nfp_net_nfdk_vec_avx2_xmit_simple_pkts(struct nfp_net_txq *txq, return 0; } - PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets", txq->qidx, nb_pkts); + PMD_TX_LOG(DEBUG, "Queue: %hu. Sending %hu packets.", txq->qidx, nb_pkts); /* Sending packets */ while (npkts < nb_pkts && free_descs >= NFDK_TX_DESC_PER_SIMPLE_PKT) { diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c index fc73fa20ab..da7ea35d62 100644 --- a/drivers/net/nfp/nfp_cpp_bridge.c +++ b/drivers/net/nfp/nfp_cpp_bridge.c @@ -38,7 +38,7 @@ nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev) ret = nfp_service_enable(&cpp_service, &pf_dev->cpp_service_info); if (ret != 0) { - PMD_INIT_LOG(DEBUG, "Could not enable service %s", cpp_service.name); + PMD_INIT_LOG(DEBUG, "Could not enable service %s.", cpp_service.name); return ret; } @@ -71,7 +71,7 @@ nfp_cpp_bridge_serve_write(int sockfd, uint32_t tmpbuf[16]; struct nfp_cpp_area *area; - PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu", __func__, + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu.", __func__, sizeof(off_t), sizeof(size_t)); /* Reading the count param */ @@ -90,9 +90,9 @@ nfp_cpp_bridge_serve_write(int sockfd, cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd", __func__, count, + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd.", __func__, count, offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd", __func__, + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd.", __func__, cpp_id, nfp_offset); /* Adjust length if not aligned */ @@ -107,14 +107,14 @@ nfp_cpp_bridge_serve_write(int sockfd, area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev", nfp_offset, curlen); if (area == NULL) { - PMD_CPP_LOG(ERR, "area alloc fail"); + PMD_CPP_LOG(ERR, "Area alloc fail."); return -EIO; } /* Mapping the target */ err = nfp_cpp_area_acquire(area); if (err < 0) { - PMD_CPP_LOG(ERR, "area acquire failed"); + PMD_CPP_LOG(ERR, "Area acquire failed."); nfp_cpp_area_free(area); return -EIO; } @@ -124,11 +124,11 @@ nfp_cpp_bridge_serve_write(int sockfd, if (len > sizeof(tmpbuf)) len = sizeof(tmpbuf); - PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu", __func__, + PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu.", __func__, len, count); err = recv(sockfd, tmpbuf, len, MSG_WAITALL); if (err != (int)len) { - PMD_CPP_LOG(ERR, "error when receiving, %d of %zu", + PMD_CPP_LOG(ERR, "Error when receiving, %d of %zu.", err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); @@ -137,7 +137,7 @@ nfp_cpp_bridge_serve_write(int sockfd, err = nfp_cpp_area_write(area, pos, tmpbuf, len); if (err < 0) { - PMD_CPP_LOG(ERR, "nfp_cpp_area_write error"); + PMD_CPP_LOG(ERR, "The nfp_cpp_area_write error."); nfp_cpp_area_release(area); nfp_cpp_area_free(area); return -EIO; @@ -177,7 +177,7 @@ nfp_cpp_bridge_serve_read(int sockfd, uint32_t tmpbuf[16]; struct nfp_cpp_area *area; - PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu", __func__, + PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu.", __func__, sizeof(off_t), sizeof(size_t)); /* Reading the count param */ @@ -196,9 +196,9 @@ nfp_cpp_bridge_serve_read(int sockfd, cpp_id = (offset >> 40) << 8; nfp_offset = offset & ((1ull << 40) - 1); - PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd", __func__, count, + PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd.", __func__, count, offset); - PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd", __func__, + PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd.", __func__, cpp_id, nfp_offset); /* Adjust length if not aligned */ @@ -212,13 +212,13 @@ nfp_cpp_bridge_serve_read(int sockfd, area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev", nfp_offset, curlen); if (area == NULL) { - PMD_CPP_LOG(ERR, "area alloc failed"); + PMD_CPP_LOG(ERR, "Area alloc failed."); return -EIO; } err = nfp_cpp_area_acquire(area); if (err < 0) { - PMD_CPP_LOG(ERR, "area acquire failed"); + PMD_CPP_LOG(ERR, "Area acquire failed."); nfp_cpp_area_free(area); return -EIO; } @@ -230,17 +230,17 @@ nfp_cpp_bridge_serve_read(int sockfd, err = nfp_cpp_area_read(area, pos, tmpbuf, len); if (err < 0) { - PMD_CPP_LOG(ERR, "nfp_cpp_area_read error"); + PMD_CPP_LOG(ERR, "The nfp_cpp_area_read error."); nfp_cpp_area_release(area); nfp_cpp_area_free(area); return -EIO; } - PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu", __func__, + PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu.", __func__, len, count); err = send(sockfd, tmpbuf, len, 0); if (err != (int)len) { - PMD_CPP_LOG(ERR, "error when sending: %d of %zu", + PMD_CPP_LOG(ERR, "Error when sending: %d of %zu.", err, count); nfp_cpp_area_release(area); nfp_cpp_area_free(area); @@ -278,39 +278,39 @@ nfp_cpp_bridge_serve_ioctl(int sockfd, /* Reading now the IOCTL command */ err = recv(sockfd, &cmd, 4, 0); if (err != 4) { - PMD_CPP_LOG(ERR, "read error from socket"); + PMD_CPP_LOG(ERR, "Read error from socket."); return -EIO; } /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */ if (cmd != NFP_IOCTL_CPP_IDENTIFICATION) { - PMD_CPP_LOG(ERR, "unknown cmd %d", cmd); + PMD_CPP_LOG(ERR, "Unknown cmd %d.", cmd); return -EINVAL; } err = recv(sockfd, &ident_size, 4, 0); if (err != 4) { - PMD_CPP_LOG(ERR, "read error from socket"); + PMD_CPP_LOG(ERR, "Read error from socket."); return -EIO; } tmp = nfp_cpp_model(cpp); - PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x", __func__, tmp); + PMD_CPP_LOG(DEBUG, "%s: sending NFP model %08x.", __func__, tmp); err = send(sockfd, &tmp, 4, 0); if (err != 4) { - PMD_CPP_LOG(ERR, "error writing to socket"); + PMD_CPP_LOG(ERR, "Error writing to socket."); return -EIO; } tmp = nfp_cpp_interface(cpp); - PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x", __func__, tmp); + PMD_CPP_LOG(DEBUG, "%s: sending NFP interface %08x.", __func__, tmp); err = send(sockfd, &tmp, 4, 0); if (err != 4) { - PMD_CPP_LOG(ERR, "error writing to socket"); + PMD_CPP_LOG(ERR, "Error writing to socket."); return -EIO; } @@ -347,7 +347,7 @@ nfp_cpp_bridge_service_func(void *args) unlink(socket_handle); sockfd = socket(AF_UNIX, SOCK_STREAM, 0); if (sockfd < 0) { - PMD_CPP_LOG(ERR, "socket creation error. Service failed"); + PMD_CPP_LOG(ERR, "Socket creation error. Service failed."); return -EIO; } @@ -361,14 +361,14 @@ nfp_cpp_bridge_service_func(void *args) ret = bind(sockfd, (const struct sockaddr *)&address, sizeof(struct sockaddr)); if (ret < 0) { - PMD_CPP_LOG(ERR, "bind error (%d). Service failed", errno); + PMD_CPP_LOG(ERR, "Bind error (%d). Service failed.", errno); close(sockfd); return ret; } ret = listen(sockfd, 20); if (ret < 0) { - PMD_CPP_LOG(ERR, "listen error(%d). Service failed", errno); + PMD_CPP_LOG(ERR, "Listen error(%d). Service failed.", errno); close(sockfd); return ret; } @@ -380,8 +380,8 @@ nfp_cpp_bridge_service_func(void *args) if (errno == EAGAIN || errno == EWOULDBLOCK) continue; - PMD_CPP_LOG(ERR, "accept call error (%d)", errno); - PMD_CPP_LOG(ERR, "service failed"); + PMD_CPP_LOG(ERR, "Accept call error (%d).", errno); + PMD_CPP_LOG(ERR, "Service failed."); close(sockfd); return -EIO; } @@ -389,11 +389,11 @@ nfp_cpp_bridge_service_func(void *args) for (;;) { ret = recv(datafd, &op, 4, 0); if (ret <= 0) { - PMD_CPP_LOG(DEBUG, "%s: socket close", __func__); + PMD_CPP_LOG(DEBUG, "%s: socket close.", __func__); break; } - PMD_CPP_LOG(DEBUG, "%s: getting op %u", __func__, op); + PMD_CPP_LOG(DEBUG, "%s: getting op %u.", __func__, op); if (op == NFP_BRIDGE_OP_READ) nfp_cpp_bridge_serve_read(datafd, cpp); diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c index b16fbe7db7..f54483822f 100644 --- a/drivers/net/nfp/nfp_ethdev.c +++ b/drivers/net/nfp/nfp_ethdev.c @@ -61,7 +61,7 @@ nfp_devarg_handle_int(const char *key, *num = strtoul(value, &end_ptr, 10); if (*num == ULONG_MAX) { - PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value); + PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param.", key, value); return -ERANGE; } else if (value == end_ptr) { return -EPERM; @@ -84,7 +84,7 @@ nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist, return 0; if (count > 1) { - PMD_DRV_LOG(ERR, "Too much bool arguments: %s", key_match); + PMD_DRV_LOG(ERR, "Too much bool arguments: %s.", key_match); return -EINVAL; } @@ -97,7 +97,7 @@ nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist, } else if (value == 0) { *value_ret = false; } else { - PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1", + PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1.", key_match); return -EINVAL; } @@ -389,7 +389,7 @@ nfp_net_start(struct rte_eth_dev *dev) if (dev->data->dev_conf.intr_conf.rxq != 0) { if (app_fw_nic->multiport) { PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported " - "with NFP multiport PF"); + "with NFP multiport PF."); return -EINVAL; } @@ -403,7 +403,7 @@ nfp_net_start(struct rte_eth_dev *dev) if (dev->data->nb_rx_queues > 1) { PMD_INIT_LOG(ERR, "PMD rx interrupt only " - "supports 1 queue with UIO"); + "supports 1 queue with UIO."); return -EIO; } } @@ -418,7 +418,7 @@ nfp_net_start(struct rte_eth_dev *dev) /* Checking MTU set */ if (dev->data->mtu > net_hw->flbufsz) { - PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", + PMD_INIT_LOG(ERR, "MTU (%u) can not be larger than the current NFP_FRAME_SIZE (%u).", dev->data->mtu, net_hw->flbufsz); return -ERANGE; } @@ -443,13 +443,6 @@ nfp_net_start(struct rte_eth_dev *dev) update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; txmode = &dev->data->dev_conf.txmode; - /* Enable vxlan */ - if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) { - if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) { - new_ctrl |= NFP_NET_CFG_CTRL_VXLAN; - update |= NFP_NET_CFG_UPDATE_VXLAN; - } - } if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; @@ -573,7 +566,7 @@ nfp_net_beat_timer(void *arg) /* Beat once per second. */ if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, (void *)multi_pf) < 0) { - PMD_DRV_LOG(ERR, "Error setting alarm"); + PMD_DRV_LOG(ERR, "Error setting alarm."); } } @@ -620,7 +613,7 @@ nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf) { if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer, (void *)multi_pf) < 0) { - PMD_DRV_LOG(ERR, "Error setting alarm"); + PMD_DRV_LOG(ERR, "Error setting alarm."); return -EIO; } @@ -854,34 +847,41 @@ nfp_udp_tunnel_port_add(struct rte_eth_dev *dev, { int ret; uint32_t idx; + uint32_t ctrl; + struct nfp_hw *hw; uint16_t vxlan_port; - struct nfp_net_hw *hw; + struct nfp_net_hw *net_hw; enum rte_eth_tunnel_type tnl_type; - hw = dev->data->dev_private; + net_hw = dev->data->dev_private; vxlan_port = tunnel_udp->udp_port; tnl_type = tunnel_udp->prot_type; if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { - PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); + PMD_DRV_LOG(ERR, "Not VXLAN tunnel."); return -ENOTSUP; } - ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); + ret = nfp_net_find_vxlan_idx(net_hw, vxlan_port, &idx); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); + PMD_DRV_LOG(ERR, "Failed find valid vxlan idx."); return -EINVAL; } - if (hw->vxlan_usecnt[idx] == 0) { - ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port); + if (net_hw->vxlan_usecnt[idx] == 0) { + hw = &net_hw->super; + ctrl = hw->ctrl | NFP_NET_CFG_CTRL_VXLAN; + + ret = nfp_net_set_vxlan_port(net_hw, idx, vxlan_port, ctrl); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed set vxlan port"); + PMD_DRV_LOG(ERR, "Failed set vxlan port."); return -EINVAL; } + + hw->ctrl = ctrl; } - hw->vxlan_usecnt[idx]++; + net_hw->vxlan_usecnt[idx]++; return 0; } @@ -892,33 +892,40 @@ nfp_udp_tunnel_port_del(struct rte_eth_dev *dev, { int ret; uint32_t idx; + uint32_t ctrl; + struct nfp_hw *hw; uint16_t vxlan_port; - struct nfp_net_hw *hw; + struct nfp_net_hw *net_hw; enum rte_eth_tunnel_type tnl_type; - hw = dev->data->dev_private; + net_hw = dev->data->dev_private; vxlan_port = tunnel_udp->udp_port; tnl_type = tunnel_udp->prot_type; if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { - PMD_DRV_LOG(ERR, "Not VXLAN tunnel"); + PMD_DRV_LOG(ERR, "Not VXLAN tunnel."); return -ENOTSUP; } - ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx); - if (ret != 0 || hw->vxlan_usecnt[idx] == 0) { - PMD_DRV_LOG(ERR, "Failed find valid vxlan idx"); + ret = nfp_net_find_vxlan_idx(net_hw, vxlan_port, &idx); + if (ret != 0 || net_hw->vxlan_usecnt[idx] == 0) { + PMD_DRV_LOG(ERR, "Failed find valid vxlan idx."); return -EINVAL; } - hw->vxlan_usecnt[idx]--; + net_hw->vxlan_usecnt[idx]--; + + if (net_hw->vxlan_usecnt[idx] == 0) { + hw = &net_hw->super; + ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_VXLAN; - if (hw->vxlan_usecnt[idx] == 0) { - ret = nfp_net_set_vxlan_port(hw, idx, 0); + ret = nfp_net_set_vxlan_port(net_hw, idx, 0, ctrl); if (ret != 0) { - PMD_DRV_LOG(ERR, "Failed set vxlan port"); + PMD_DRV_LOG(ERR, "Failed set vxlan port."); return -EINVAL; } + + hw->ctrl = ctrl; } return 0; @@ -971,6 +978,13 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .fec_get_capability = nfp_net_fec_get_capability, .fec_get = nfp_net_fec_get, .fec_set = nfp_net_fec_set, + .get_eeprom_length = nfp_net_get_eeprom_len, + .get_eeprom = nfp_net_get_eeprom, + .set_eeprom = nfp_net_set_eeprom, + .get_module_info = nfp_net_get_module_info, + .get_module_eeprom = nfp_net_get_module_eeprom, + .dev_led_on = nfp_net_led_on, + .dev_led_off = nfp_net_led_off, }; static inline void @@ -1023,14 +1037,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev, port = net_hw->idx; if (port > 7) { - PMD_DRV_LOG(ERR, "Port value is wrong"); + PMD_DRV_LOG(ERR, "Port value is wrong."); return -ENODEV; } hw = &net_hw->super; PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, " - "NFP internal port number: %d", port, net_hw->nfp_idx); + "NFP internal port number: %d.", port, net_hw->nfp_idx); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1042,8 +1056,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev, net_hw->mac_stats = pf_dev->mac_stats_bar + (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); - PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); - PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats); + PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "MAC stats: %p.", net_hw->mac_stats); err = nfp_net_common_init(pf_dev, net_hw); if (err != 0) @@ -1051,13 +1065,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev, err = nfp_net_tlv_caps_parse(eth_dev); if (err != 0) { - PMD_INIT_LOG(ERR, "Failed to parser TLV caps"); + PMD_INIT_LOG(ERR, "Failed to parser TLV caps."); return err; } err = nfp_ipsec_init(eth_dev); if (err != 0) { - PMD_INIT_LOG(ERR, "Failed to init IPsec module"); + PMD_INIT_LOG(ERR, "Failed to init IPsec module."); return err; } @@ -1066,7 +1080,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0); if (net_hw->eth_xstats_base == NULL) { - PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", + PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!", pci_dev->device.name); err = -ENOMEM; goto ipsec_exit; @@ -1079,7 +1093,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; - PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", + PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.", hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar); nfp_net_cfg_queue_setup(net_hw); @@ -1097,7 +1111,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, if ((port == 0 || pf_dev->multi_pf.enabled)) { err = nfp_net_vf_config_app_init(net_hw, pf_dev); if (err != 0) { - PMD_INIT_LOG(ERR, "Failed to init sriov module"); + PMD_INIT_LOG(ERR, "Failed to init sriov module."); goto xstats_free; } } @@ -1105,7 +1119,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, "Failed to space for MAC address"); + PMD_INIT_LOG(ERR, "Failed to space for MAC address."); err = -ENOMEM; goto xstats_free; } @@ -1120,7 +1134,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { - PMD_INIT_LOG(INFO, "Using random mac address for port %d", port); + PMD_INIT_LOG(INFO, "Using random mac address for port %d.", port); /* Using random mac addresses for VFs */ rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); @@ -1134,7 +1148,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x " + PMD_INIT_LOG(INFO, "Port %d VendorID=%#x DeviceID=%#x " "mac=" RTE_ETHER_ADDR_PRT_FMT, eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, @@ -1153,7 +1167,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev, if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) { err = nfp_net_flow_priv_init(pf_dev, port); if (err != 0) { - PMD_INIT_LOG(ERR, "Init net flow priv failed"); + PMD_INIT_LOG(ERR, "Init net flow priv failed."); goto txrwb_free; } } @@ -1182,7 +1196,7 @@ nfp_net_device_activate(struct nfp_pf_dev *pf_dev) if (multi_pf->enabled && multi_pf->function_id != 0) { nsp = nfp_nsp_open(pf_dev->cpp); if (nsp == NULL) { - PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle."); return -EIO; } @@ -1224,7 +1238,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev, cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff); snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial); - PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); if (access(fw_name, F_OK) == 0) return 0; @@ -1232,7 +1246,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev, snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH, pf_dev->pci_dev->name); - PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); if (access(fw_name, F_OK) == 0) return 0; @@ -1240,7 +1254,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev, if (nfp_fw_model == NULL) { nfp_fw_model = nfp_hwinfo_lookup(pf_dev->hwinfo, "assembly.partno"); if (nfp_fw_model == NULL) { - PMD_DRV_LOG(ERR, "firmware model NOT found"); + PMD_DRV_LOG(ERR, "Firmware model NOT found."); return -EIO; } } @@ -1248,7 +1262,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev, /* And then try the model name */ snprintf(card_desc, sizeof(card_desc), "%s.nffw", nfp_fw_model); snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc); - PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); if (access(fw_name, F_OK) == 0) return 0; @@ -1257,7 +1271,7 @@ nfp_fw_get_name(struct nfp_pf_dev *pf_dev, nfp_fw_model, pf_dev->nfp_eth_table->count, pf_dev->nfp_eth_table->ports[0].speed / 1000); snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc); - PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name); if (access(fw_name, F_OK) == 0) return 0; @@ -1274,11 +1288,11 @@ nfp_fw_upload(struct nfp_nsp *nsp, err = rte_firmware_read(fw_name, &fw_buf, &fsize); if (err != 0) { - PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name); + PMD_DRV_LOG(ERR, "Firmware %s not found!", fw_name); return -ENOENT; } - PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu", + PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu.", fw_name, fsize); PMD_DRV_LOG(INFO, "Uploading the firmware ..."); if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) { @@ -1287,7 +1301,7 @@ nfp_fw_upload(struct nfp_nsp *nsp, return -EIO; } - PMD_DRV_LOG(INFO, "Done"); + PMD_DRV_LOG(INFO, "Done."); free(fw_buf); @@ -1327,11 +1341,11 @@ nfp_fw_check_change(struct nfp_cpp *cpp, nfp_net_get_fw_version(cpp, &old_version); if (new_version != old_version) { - PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u", + PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u.", new_version, old_version); *fw_changed = true; } else { - PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version); + PMD_DRV_LOG(INFO, "FW version is not changed and is %u.", new_version); *fw_changed = false; } @@ -1380,7 +1394,7 @@ nfp_fw_reload(struct nfp_nsp *nsp, if (reset_flag) { err = nfp_nsp_device_soft_reset(nsp); if (err != 0) { - PMD_DRV_LOG(ERR, "NFP firmware soft reset failed"); + PMD_DRV_LOG(ERR, "NFP firmware soft reset failed."); return err; } } @@ -1395,7 +1409,7 @@ nfp_fw_reload(struct nfp_nsp *nsp, err = nfp_fw_upload(nsp, fw_name); if (err != 0) { - PMD_DRV_LOG(ERR, "NFP firmware load failed"); + PMD_DRV_LOG(ERR, "NFP firmware load failed."); return err; } @@ -1447,7 +1461,7 @@ nfp_fw_skip_load(const struct nfp_dev_info *dev_info, beat[port_num] = 0; if (*reload_fw) { *reload_fw = false; - PMD_DRV_LOG(ERR, "The param %s does not work", + PMD_DRV_LOG(ERR, "The param %s does not work.", NFP_PF_FORCE_RELOAD_FW); } } @@ -1581,13 +1595,13 @@ nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp, err = nfp_net_keepalive_init(pf_dev->cpp, multi_pf); if (err != 0) { - PMD_DRV_LOG(ERR, "NFP init beat failed"); + PMD_DRV_LOG(ERR, "NFP init beat failed."); return err; } err = nfp_net_keepalive_start(multi_pf); if (err != 0) { - PMD_DRV_LOG(ERR, "NFP write beat failed"); + PMD_DRV_LOG(ERR, "NFP write beat failed."); goto keepalive_uninit; } @@ -1660,7 +1674,7 @@ nfp_fw_policy_value_get(struct nfp_nsp *nsp, ret = nfp_strtol(buf, 0, &val); if (ret != 0 || val < 0 || val > max_val) { - PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring", + PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring.", buf, key); /* Fall back to the default value */ ret = nfp_strtol(default_val, 0, &val); @@ -1685,7 +1699,7 @@ nfp_fw_setup(struct nfp_pf_dev *pf_dev, nsp = nfp_nsp_open(pf_dev->cpp); if (nsp == NULL) { - PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle."); return -EIO; } @@ -1709,7 +1723,7 @@ nfp_fw_setup(struct nfp_pf_dev *pf_dev, if (policy != NFP_NSP_APP_FW_LOAD_FLASH) { err = nfp_fw_get_name(pf_dev, fw_name, sizeof(fw_name)); if (err != 0) { - PMD_DRV_LOG(ERR, "Can't find suitable firmware."); + PMD_DRV_LOG(ERR, "Can not find suitable firmware."); goto close_nsp; } } @@ -1744,7 +1758,7 @@ nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev, nsp = nfp_nsp_open(cpp); if (nsp == NULL) { - PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle."); return false; } @@ -1776,7 +1790,7 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev) ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, pf_dev->ctrl_bar_size, &area); if (ctrl_bar == NULL) { - PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol"); + PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol."); return -ENODEV; } @@ -1798,7 +1812,7 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev) cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1); if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) { - PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF"); + PMD_INIT_LOG(ERR, "Loaded firmware does not support multiple PF."); err = -EINVAL; goto end; } @@ -1807,7 +1821,13 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev) net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; nfp_net_cfg_queue_setup(&net_hw); rte_spinlock_init(&hw->reconfig_lock); - nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN); + err = nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, + NFP_NET_CFG_UPDATE_GEN); + if (err != 0) { + PMD_INIT_LOG(ERR, "Configure multiple PF failed."); + goto end; + } + end: nfp_cpp_area_release_free(area); return err; @@ -1822,7 +1842,7 @@ nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev) if (pf_dev->multi_pf.enabled) { if (!nfp_check_multi_pf_from_fw(total_phyports)) { - PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf"); + PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf."); return false; } } else { @@ -1831,7 +1851,7 @@ nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev) * number of physical ports. */ if (total_phyports != pf_dev->nfp_eth_table->count) { - PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs"); + PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs."); return false; } } @@ -1869,7 +1889,7 @@ nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv) }; nfp_eth_table = pf_dev->nfp_eth_table; - PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count); + PMD_INIT_LOG(INFO, "Total physical ports: %d.", nfp_eth_table->count); id = nfp_function_id_get(pf_dev, 0); /* Allocate memory for the CoreNIC app */ @@ -1896,12 +1916,12 @@ nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv) pf_dev->total_phyports * pf_dev->ctrl_bar_size, &pf_dev->ctrl_area); if (pf_dev->ctrl_bar == NULL) { - PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name); + PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for %s.", bar_name); ret = -EIO; goto app_cleanup; } - PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); + PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", pf_dev->ctrl_bar); /* Loop through all physical ports on PF */ for (i = 0; i < pf_dev->total_phyports; i++) { @@ -2070,7 +2090,7 @@ nfp_net_speed_cap_get_one(struct nfp_pf_dev *pf_dev, nsp = nfp_nsp_open(pf_dev->cpp); if (nsp == NULL) { - PMD_DRV_LOG(ERR, "Couldn't get NSP."); + PMD_DRV_LOG(ERR, "Could not get NSP."); return -EIO; } @@ -2141,7 +2161,7 @@ nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv) switch (pf_dev->app_fw_id) { case NFP_APP_FW_CORE_NIC: - PMD_INIT_LOG(INFO, "Initializing coreNIC"); + PMD_INIT_LOG(INFO, "Initializing coreNIC."); ret = nfp_init_app_fw_nic(hw_priv); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); @@ -2149,7 +2169,7 @@ nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv) } break; case NFP_APP_FW_FLOWER_NIC: - PMD_INIT_LOG(INFO, "Initializing Flower"); + PMD_INIT_LOG(INFO, "Initializing Flower."); ret = nfp_init_app_fw_flower(hw_priv); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not initialize Flower!"); @@ -2157,7 +2177,7 @@ nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv) } break; default: - PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); + PMD_INIT_LOG(ERR, "Unsupported Firmware loaded."); ret = -EINVAL; return ret; } @@ -2197,7 +2217,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev, pos = rte_pci_find_ext_capability(pf_dev->pci_dev, RTE_PCI_EXT_CAP_ID_SRIOV); if (pos == 0) { - PMD_INIT_LOG(ERR, "Can not get the pci sriov cap"); + PMD_INIT_LOG(ERR, "Can not get the pci sriov cap."); return -EIO; } @@ -2208,7 +2228,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev, ret = rte_pci_read_config(pf_dev->pci_dev, &sriov_vf, sizeof(sriov_vf), pos + RTE_PCI_SRIOV_TOTAL_VF); if (ret < 0) { - PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF"); + PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF."); return -EIO; } @@ -2216,7 +2236,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev, ret = rte_pci_read_config(pf_dev->pci_dev, &offset, sizeof(offset), pos + RTE_PCI_SRIOV_VF_OFFSET); if (ret < 0) { - PMD_INIT_LOG(ERR, "Can not get the VF offset"); + PMD_INIT_LOG(ERR, "Can not get the VF offset."); return -EIO; } @@ -2226,7 +2246,7 @@ nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev, offset -= dev_info->pf_num_per_unit; if (offset >= pf_dev->max_vfs || offset + sriov_vf > pf_dev->max_vfs) { - PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF"); + PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF."); return -ERANGE; } @@ -2245,11 +2265,11 @@ nfp_net_get_vf_info(struct nfp_pf_dev *pf_dev, ret = nfp_pf_get_max_vf(pf_dev); if (ret != 0) { if (ret != -ENOENT) { - PMD_INIT_LOG(ERR, "Read max VFs failed"); + PMD_INIT_LOG(ERR, "Read max VFs failed."); return ret; } - PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs"); + PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs."); return 0; } @@ -2335,13 +2355,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev) dev_info = nfp_dev_info_get(pci_dev->id.device_id); if (dev_info == NULL) { - PMD_INIT_LOG(ERR, "Not supported device ID"); + PMD_INIT_LOG(ERR, "Not supported device ID."); return -ENODEV; } hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0); if (hw_priv == NULL) { - PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data"); + PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data."); return -ENOMEM; } @@ -2350,7 +2370,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) snprintf(name, sizeof(name), "nfp_pf%u", function_id); pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); if (pf_dev == NULL) { - PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); + PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device."); ret = -ENOMEM; goto hw_priv_free; } @@ -2380,7 +2400,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); if (cpp == NULL) { - PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); + PMD_INIT_LOG(ERR, "A CPP handle can not be obtained."); ret = -EIO; goto sync_free; } @@ -2390,7 +2410,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) hwinfo = nfp_hwinfo_read(cpp); if (hwinfo == NULL) { - PMD_INIT_LOG(ERR, "Error reading hwinfo table"); + PMD_INIT_LOG(ERR, "Error reading hwinfo table."); ret = -EIO; goto cpp_cleanup; } @@ -2400,13 +2420,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev) /* Read the number of physical ports from hardware */ nfp_eth_table = nfp_eth_read_ports(cpp); if (nfp_eth_table == NULL) { - PMD_INIT_LOG(ERR, "Error reading NFP ethernet table"); + PMD_INIT_LOG(ERR, "Error reading NFP ethernet table."); ret = -EIO; goto hwinfo_cleanup; } if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { - PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u", + PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u.", nfp_eth_table->count); ret = -EIO; goto eth_table_cleanup; @@ -2419,28 +2439,28 @@ nfp_pf_init(struct rte_pci_device *pci_dev) ret = nfp_net_force_port_down(pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to force port down"); + PMD_INIT_LOG(ERR, "Failed to force port down."); ret = -EIO; goto eth_table_cleanup; } ret = nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs); if (ret != 0) { - PMD_INIT_LOG(ERR, "Error when parsing device args"); + PMD_INIT_LOG(ERR, "Error when parsing device args."); ret = -EINVAL; goto eth_table_cleanup; } ret = nfp_net_device_activate(pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to activate the NFP device"); + PMD_INIT_LOG(ERR, "Failed to activate the NFP device."); ret = -EIO; goto eth_table_cleanup; } ret = nfp_fw_setup(pf_dev, dev_info); if (ret != 0) { - PMD_INIT_LOG(ERR, "Error when uploading firmware"); + PMD_INIT_LOG(ERR, "Error when uploading firmware."); ret = -EIO; goto eth_table_cleanup; } @@ -2448,7 +2468,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) /* Now the symbol table should be there */ sym_tbl = nfp_rtsym_table_read(cpp); if (sym_tbl == NULL) { - PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); + PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table."); ret = -EIO; goto fw_cleanup; } @@ -2459,7 +2479,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); if (ret != 0) { - PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name); + PMD_INIT_LOG(ERR, "Could not read %s from firmware.", app_name); ret = -EIO; goto sym_tbl_cleanup; } @@ -2496,18 +2516,18 @@ nfp_pf_init(struct rte_pci_device *pci_dev) pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id, addr, dev_info->qc_area_sz, &pf_dev->qc_area); if (pf_dev->qc_bar == NULL) { - PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc"); + PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for net.qc."); ret = -EIO; goto sym_tbl_cleanup; } - PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); + PMD_INIT_LOG(DEBUG, "The qc_bar address: %p.", pf_dev->qc_bar); pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats", NFP_MAC_STATS_SIZE * nfp_eth_table->max_index, &pf_dev->mac_stats_area); if (pf_dev->mac_stats_bar == NULL) { - PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats"); + PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for _mac_stats."); goto hwqueues_cleanup; } @@ -2523,6 +2543,12 @@ nfp_pf_init(struct rte_pci_device *pci_dev) hw_priv->is_pf = true; + if (!nfp_net_recv_pkt_meta_check_register(hw_priv)) { + PMD_INIT_LOG(ERR, "PF register meta check function failed."); + ret = -EIO; + goto hw_priv_free; + } + /* * PF initialization has been done at this point. Call app specific * init code now. @@ -2603,11 +2629,11 @@ nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv) for (i = 0; i < total_vnics; i++) { nfp_port_name_generate(port_name, sizeof(port_name), i, pf_dev); - PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name); + PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s.", port_name); ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 0, NULL, NULL, nfp_secondary_net_init, hw_priv); if (ret != 0) { - PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name); + PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed.", port_name); goto port_cleanup; } } @@ -2635,7 +2661,7 @@ nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv) switch (pf_dev->app_fw_id) { case NFP_APP_FW_CORE_NIC: - PMD_INIT_LOG(INFO, "Initializing coreNIC"); + PMD_INIT_LOG(INFO, "Initializing coreNIC."); ret = nfp_secondary_init_app_fw_nic(hw_priv); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); @@ -2643,7 +2669,7 @@ nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv) } break; case NFP_APP_FW_FLOWER_NIC: - PMD_INIT_LOG(INFO, "Initializing Flower"); + PMD_INIT_LOG(INFO, "Initializing Flower."); ret = nfp_secondary_init_app_fw_flower(hw_priv); if (ret != 0) { PMD_INIT_LOG(ERR, "Could not initialize Flower!"); @@ -2651,7 +2677,7 @@ nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv) } break; default: - PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); + PMD_INIT_LOG(ERR, "Unsupported Firmware loaded."); ret = -EINVAL; return ret; } @@ -2689,13 +2715,13 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) dev_info = nfp_dev_info_get(pci_dev->id.device_id); if (dev_info == NULL) { - PMD_INIT_LOG(ERR, "Not supported device ID"); + PMD_INIT_LOG(ERR, "Not supported device ID."); return -ENODEV; } hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0); if (hw_priv == NULL) { - PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data"); + PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data."); return -ENOMEM; } @@ -2704,7 +2730,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) snprintf(name, sizeof(name), "nfp_pf%d", 0); pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0); if (pf_dev == NULL) { - PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device"); + PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device."); ret = -ENOMEM; goto hw_priv_free; } @@ -2734,7 +2760,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true); if (cpp == NULL) { - PMD_INIT_LOG(ERR, "A CPP handle can not be obtained"); + PMD_INIT_LOG(ERR, "A CPP handle can not be obtained."); ret = -EIO; goto sync_free; } @@ -2748,7 +2774,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) */ sym_tbl = nfp_rtsym_table_read(cpp); if (sym_tbl == NULL) { - PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); + PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table."); ret = -EIO; goto cpp_cleanup; } @@ -2764,7 +2790,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id); app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret); if (ret != 0) { - PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name); + PMD_INIT_LOG(ERR, "Could not read %s from fw.", app_name); ret = -EIO; goto sym_tbl_cleanup; } diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c index 0aadca9010..36b98dc0c2 100644 --- a/drivers/net/nfp/nfp_ethdev_vf.c +++ b/drivers/net/nfp/nfp_ethdev_vf.c @@ -51,7 +51,7 @@ nfp_netvf_start(struct rte_eth_dev *dev) if (dev->data->nb_rx_queues > 1) { PMD_INIT_LOG(ERR, "PMD rx interrupt only " - "supports 1 queue with UIO"); + "supports 1 queue with UIO."); return -EIO; } } @@ -268,7 +268,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) dev_info = nfp_dev_info_get(pci_dev->id.device_id); if (dev_info == NULL) { - PMD_INIT_LOG(ERR, "Not supported device ID"); + PMD_INIT_LOG(ERR, "Not supported device ID."); return -ENODEV; } @@ -277,7 +277,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw->ctrl_bar = pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - PMD_DRV_LOG(ERR, "hw->super.ctrl_bar is NULL. BAR0 not configured"); + PMD_DRV_LOG(ERR, "The hw->super.ctrl_bar is NULL. BAR0 not configured."); return -ENODEV; } @@ -298,7 +298,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) /* Set the ctrl bar size */ nfp_net_ctrl_bar_size_set(pf_dev); - PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar); err = nfp_net_common_init(pf_dev, net_hw); if (err != 0) @@ -308,7 +308,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0); if (hw_priv == NULL) { - PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data"); + PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data."); err = -ENOMEM; goto hw_priv_free; } @@ -316,6 +316,12 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw_priv->dev_info = dev_info; hw_priv->pf_dev = pf_dev; + if (!nfp_net_recv_pkt_meta_check_register(hw_priv)) { + PMD_INIT_LOG(ERR, "VF register meta check function failed."); + err = -EINVAL; + goto hw_priv_free; + } + eth_dev->process_private = hw_priv; /* For secondary processes, the primary has done all the work */ @@ -340,7 +346,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) net_hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off; net_hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off; - PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", + PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.", hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar); nfp_net_cfg_queue_setup(net_hw); @@ -358,14 +364,14 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, "Failed to space for MAC address"); + PMD_INIT_LOG(ERR, "Failed to space for MAC address."); err = -ENOMEM; goto free_xstats; } nfp_read_mac(hw); if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) { - PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port); + PMD_INIT_LOG(INFO, "Using random mac address for port %hu.", port); /* Using random mac addresses for VFs */ rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]); nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]); @@ -379,7 +385,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - PMD_INIT_LOG(INFO, "port %hu VendorID=%#x DeviceID=%#x " + PMD_INIT_LOG(INFO, "Port %hu VendorID=%#x DeviceID=%#x " "mac=" RTE_ETHER_ADDR_PRT_FMT, port, pci_dev->id.vendor_id, pci_dev->id.device_id, diff --git a/drivers/net/nfp/nfp_ipsec.c b/drivers/net/nfp/nfp_ipsec.c index 13f2b850e5..776e2ec12c 100644 --- a/drivers/net/nfp/nfp_ipsec.c +++ b/drivers/net/nfp/nfp_ipsec.c @@ -453,7 +453,7 @@ nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *net_hw, ret = nfp_net_mbox_reconfig(net_hw, NFP_NET_CFG_MBOX_CMD_IPSEC); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox"); + PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox."); return ret; } @@ -530,7 +530,7 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, iv_str = strdup(iv_string); if (iv_str == NULL) { - PMD_DRV_LOG(ERR, "Failed to strdup iv_string"); + PMD_DRV_LOG(ERR, "Failed to strdup iv_string."); return; } @@ -616,13 +616,13 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, } if (aead->digest_length != 16) { - PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_CHACHA20_POLY1305"); + PMD_DRV_LOG(ERR, "ICV must be 128bit with RTE_CRYPTO_AEAD_CHACHA20_POLY1305."); return -EINVAL; } /* Aead->alg_key_len includes 32-bit salt */ if (key_length != 32) { - PMD_DRV_LOG(ERR, "Unsupported CHACHA20 key length"); + PMD_DRV_LOG(ERR, "Unsupported CHACHA20 key length."); return -EINVAL; } @@ -659,7 +659,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, if (iv_str != NULL) { iv_len = aead->iv.length; if (iv_len > NFP_ESP_IV_LENGTH) { - PMD_DRV_LOG(ERR, "Unsupported length of iv data"); + PMD_DRV_LOG(ERR, "Unsupported length of iv data."); return -EINVAL; } @@ -715,7 +715,7 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev, key = (const rte_be32_t *)(cipher->key.data); if (key_length > sizeof(cfg->cipher_key)) { - PMD_DRV_LOG(ERR, "Insufficient space for offloaded key"); + PMD_DRV_LOG(ERR, "Insufficient space for offloaded key."); return -EINVAL; } @@ -858,7 +858,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, } if (digest_length == 0) { - PMD_DRV_LOG(ERR, "Unsupported authentication algorithm digest length"); + PMD_DRV_LOG(ERR, "Unsupported authentication algorithm digest length."); return -EINVAL; } @@ -1013,7 +1013,7 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, cfg->ctrl_word.encap_dsbl = 0; break; default: - PMD_DRV_LOG(ERR, "Unsupported IPsec action for offload, action: %d", + PMD_DRV_LOG(ERR, "Unsupported IPsec action for offload, action: %d.", conf->action_type); return -EINVAL; } @@ -1026,7 +1026,7 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH; break; default: - PMD_DRV_LOG(ERR, "Unsupported IPsec protocol for offload, protocol: %d", + PMD_DRV_LOG(ERR, "Unsupported IPsec protocol for offload, protocol: %d.", conf->ipsec.proto); return -EINVAL; } @@ -1062,7 +1062,7 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, break; default: - PMD_DRV_LOG(ERR, "Unsupported IPsec mode for offload, mode: %d", + PMD_DRV_LOG(ERR, "Unsupported IPsec mode for offload, mode: %d.", conf->ipsec.mode); return -EINVAL; } @@ -1100,7 +1100,7 @@ nfp_crypto_create_session(void *device, net_hw = eth_dev->data->dev_private; if (net_hw->ipsec_data->sa_free_cnt == 0) { - PMD_DRV_LOG(ERR, "No space in SA table, spi: %d", conf->ipsec.spi); + PMD_DRV_LOG(ERR, "No space in SA table, spi: %d.", conf->ipsec.spi); return -EINVAL; } @@ -1122,7 +1122,7 @@ nfp_crypto_create_session(void *device, msg.sa_idx = sa_idx; ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to add SA to nic"); + PMD_DRV_LOG(ERR, "Failed to add SA to nic."); return -EINVAL; } @@ -1255,7 +1255,7 @@ nfp_security_session_get_stats(void *device, ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to get SA stats"); + PMD_DRV_LOG(ERR, "Failed to get SA stats."); return ret; } @@ -1330,13 +1330,13 @@ nfp_crypto_remove_session(void *device, eth_dev = device; priv_session = SECURITY_GET_SESS_PRIV(session); if (eth_dev != priv_session->dev) { - PMD_DRV_LOG(ERR, "Session not bound to this device"); + PMD_DRV_LOG(ERR, "Session not bound to this device."); return -ENODEV; } ret = nfp_crypto_remove_sa(eth_dev, priv_session); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to remove session"); + PMD_DRV_LOG(ERR, "Failed to remove session."); return -EFAULT; } @@ -1369,7 +1369,7 @@ nfp_ipsec_ctx_create(struct rte_eth_dev *dev, ctx = rte_zmalloc("security_ctx", sizeof(struct rte_security_ctx), 0); if (ctx == NULL) { - PMD_INIT_LOG(ERR, "Failed to malloc security_ctx"); + PMD_INIT_LOG(ERR, "Failed to malloc security_ctx."); return -ENOMEM; } @@ -1380,7 +1380,7 @@ nfp_ipsec_ctx_create(struct rte_eth_dev *dev, data->pkt_dynfield_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield); if (data->pkt_dynfield_offset < 0) { - PMD_INIT_LOG(ERR, "Failed to register mbuf esn_dynfield"); + PMD_INIT_LOG(ERR, "Failed to register mbuf esn_dynfield."); return -ENOMEM; } @@ -1399,13 +1399,13 @@ nfp_ipsec_init(struct rte_eth_dev *dev) cap_extend = net_hw->super.cap_ext; if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) { - PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability"); + PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability."); return 0; } data = rte_zmalloc("ipsec_data", sizeof(struct nfp_net_ipsec_data), 0); if (data == NULL) { - PMD_INIT_LOG(ERR, "Failed to malloc ipsec_data"); + PMD_INIT_LOG(ERR, "Failed to malloc ipsec_data."); return -ENOMEM; } @@ -1415,7 +1415,7 @@ nfp_ipsec_init(struct rte_eth_dev *dev) ret = nfp_ipsec_ctx_create(dev, data); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to create IPsec ctx"); + PMD_INIT_LOG(ERR, "Failed to create IPsec ctx."); goto ipsec_cleanup; } @@ -1445,7 +1445,7 @@ nfp_ipsec_uninit(struct rte_eth_dev *dev) cap_extend = net_hw->super.cap_ext; if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) { - PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability"); + PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability."); return; } diff --git a/drivers/net/nfp/nfp_mtr.c b/drivers/net/nfp/nfp_mtr.c index 6abc6dc9bc..d4f2c4f2f0 100644 --- a/drivers/net/nfp/nfp_mtr.c +++ b/drivers/net/nfp/nfp_mtr.c @@ -43,7 +43,7 @@ nfp_mtr_cap_get(struct rte_eth_dev *dev __rte_unused, if (cap == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "NULL pointer for capabilitie argument"); + NULL, "NULL pointer for capabilitie argument."); } memset(cap, 0, sizeof(struct rte_mtr_capabilities)); @@ -78,14 +78,14 @@ nfp_mtr_profile_validate(uint32_t mtr_profile_id, if (profile == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, - NULL, "Meter profile is null"); + NULL, "Meter profile is null."); } /* Meter profile ID must be valid. */ if (mtr_profile_id >= NFP_MAX_PROFILE_CNT) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, - NULL, "Meter profile id not valid"); + NULL, "Meter profile id not valid."); } switch (profile->alg) { @@ -95,11 +95,11 @@ nfp_mtr_profile_validate(uint32_t mtr_profile_id, case RTE_MTR_TRTCM_RFC4115: return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, - NULL, "Unsupported metering algorithm"); + NULL, "Unsupported metering algorithm."); default: return -rte_mtr_error_set(error, ENOTSUP, RTE_MTR_ERROR_TYPE_METER_PROFILE, - NULL, "Unknown metering algorithm"); + NULL, "Unknown metering algorithm."); } } @@ -202,7 +202,7 @@ nfp_mtr_profile_insert(struct nfp_app_fw_flower *app_fw_flower, if (mtr_profile == NULL) { return -rte_mtr_error_set(error, ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Meter profile alloc failed"); + NULL, "Meter profile alloc failed."); } ret = nfp_mtr_profile_conf_insert(mtr_profile_id, @@ -210,7 +210,7 @@ nfp_mtr_profile_insert(struct nfp_app_fw_flower *app_fw_flower, if (ret != 0) { rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Insert profile config failed"); + NULL, "Insert profile config failed."); goto free_profile; } @@ -218,7 +218,7 @@ nfp_mtr_profile_insert(struct nfp_app_fw_flower *app_fw_flower, if (ret != 0) { rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Add meter to firmware failed"); + NULL, "Add meter to firmware failed."); goto free_profile; } @@ -252,7 +252,7 @@ nfp_mtr_profile_mod(struct nfp_app_fw_flower *app_fw_flower, if (ret != 0) { rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Mod profile config failed"); + NULL, "Mod profile config failed."); goto rollback; } @@ -260,7 +260,7 @@ nfp_mtr_profile_mod(struct nfp_app_fw_flower *app_fw_flower, if (ret != 0) { rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Mod meter to firmware failed"); + NULL, "Mod meter to firmware failed."); goto rollback; } @@ -354,20 +354,20 @@ nfp_mtr_profile_delete(struct rte_eth_dev *dev, if (mtr_profile == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, - NULL, "Request meter profile not exist"); + NULL, "Request meter profile not exist."); } if (mtr_profile->in_use) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, - NULL, "Request meter profile is been used"); + NULL, "Request meter profile is been used."); } ret = nfp_flower_cmsg_qos_delete(app_fw_flower, &mtr_profile->conf); if (ret != 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Delete meter from firmware failed"); + NULL, "Delete meter from firmware failed."); } /* Remove profile from profile list */ @@ -417,7 +417,7 @@ nfp_mtr_policy_validate(uint32_t mtr_policy_id, if (action != NULL && action->type != RTE_FLOW_ACTION_TYPE_VOID) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, "Green action must be void or end"); + NULL, "Green action must be void or end."); } /* Check yellow action @@ -427,7 +427,7 @@ nfp_mtr_policy_validate(uint32_t mtr_policy_id, if (action != NULL && action->type != RTE_FLOW_ACTION_TYPE_VOID) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, "Yellow action must be void or end"); + NULL, "Yellow action must be void or end."); } /* Check red action */ @@ -435,7 +435,7 @@ nfp_mtr_policy_validate(uint32_t mtr_policy_id, if (action == NULL || action->type != RTE_FLOW_ACTION_TYPE_DROP) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, "Red action must be drop"); + NULL, "Red action must be drop."); } return 0; @@ -475,7 +475,7 @@ nfp_mtr_policy_add(struct rte_eth_dev *dev, if (mtr_policy != NULL) { return -rte_mtr_error_set(error, EEXIST, RTE_MTR_ERROR_TYPE_METER_POLICY_ID, - NULL, "Meter policy already exist"); + NULL, "Meter policy already exist."); } /* Check input params */ @@ -488,7 +488,7 @@ nfp_mtr_policy_add(struct rte_eth_dev *dev, if (mtr_policy == NULL) { return -rte_mtr_error_set(error, ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Meter policy alloc failed"); + NULL, "Meter policy alloc failed."); } mtr_policy->policy_id = mtr_policy_id; @@ -531,13 +531,13 @@ nfp_mtr_policy_delete(struct rte_eth_dev *dev, if (mtr_policy == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY_ID, - NULL, "Request meter policy not exist"); + NULL, "Request meter policy not exist."); } if (mtr_policy->ref_cnt > 0) { return -rte_mtr_error_set(error, EBUSY, RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, "Request mtr policy is been used"); + NULL, "Request mtr policy is been used."); } /* Remove profile from profile list */ @@ -577,25 +577,25 @@ nfp_mtr_stats_mask_validate(uint64_t stats_mask, struct rte_mtr_error *error) if ((stats_mask & RTE_MTR_STATS_N_PKTS_YELLOW) != 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, - NULL, "RTE_MTR_STATS_N_PKTS_YELLOW not support"); + NULL, "RTE_MTR_STATS_N_PKTS_YELLOW not support."); } if ((stats_mask & RTE_MTR_STATS_N_PKTS_RED) != 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, - NULL, "RTE_MTR_STATS_N_PKTS_RED not support"); + NULL, "RTE_MTR_STATS_N_PKTS_RED not support."); } if ((stats_mask & RTE_MTR_STATS_N_BYTES_YELLOW) != 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, - NULL, "RTE_MTR_STATS_N_BYTES_YELLOW not support"); + NULL, "RTE_MTR_STATS_N_BYTES_YELLOW not support."); } if ((stats_mask & RTE_MTR_STATS_N_BYTES_RED) != 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, - NULL, "RTE_MTR_STATS_N_BYTES_RED not support"); + NULL, "RTE_MTR_STATS_N_BYTES_RED not support."); } return 0; @@ -623,7 +623,7 @@ nfp_mtr_validate(uint32_t meter_id, if (params->use_prev_mtr_color != 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, - NULL, "Feature use_prev_mtr_color not support"); + NULL, "Feature use_prev_mtr_color not support."); } return nfp_mtr_stats_mask_validate(params->stats_mask, error); @@ -689,7 +689,7 @@ nfp_mtr_create(struct rte_eth_dev *dev, if (mtr != NULL) { return -rte_mtr_error_set(error, EEXIST, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Meter already exist"); + NULL, "Meter already exist."); } /* Check input meter params */ @@ -701,20 +701,20 @@ nfp_mtr_create(struct rte_eth_dev *dev, if (mtr_profile == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, - NULL, "Request meter profile not exist"); + NULL, "Request meter profile not exist."); } if (mtr_profile->in_use) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, - NULL, "Request meter profile is been used"); + NULL, "Request meter profile is been used."); } mtr_policy = nfp_mtr_policy_search(priv, params->meter_policy_id); if (mtr_policy == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY_ID, - NULL, "Request meter policy not exist"); + NULL, "Request meter policy not exist."); } /* Meter param memory alloc */ @@ -722,7 +722,7 @@ nfp_mtr_create(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Meter param alloc failed"); + NULL, "Meter param alloc failed."); } nfp_mtr_config(mtr_id, shared, params, mtr_profile, mtr_policy, mtr); @@ -767,13 +767,13 @@ nfp_mtr_destroy(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter not exist"); + NULL, "Request meter not exist."); } if (mtr->ref_cnt > 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Meter object is being used"); + NULL, "Meter object is being used."); } /* Update profile/policy status */ @@ -817,7 +817,7 @@ nfp_mtr_enable(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter not exist"); + NULL, "Request meter not exist."); } mtr->enable = true; @@ -855,13 +855,13 @@ nfp_mtr_disable(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter not exist"); + NULL, "Request meter not exist."); } if (mtr->ref_cnt > 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Can't disable a used meter"); + NULL, "Can not disable a used meter."); } mtr->enable = false; @@ -903,13 +903,13 @@ nfp_mtr_profile_update(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter not exist"); + NULL, "Request meter not exist."); } if (mtr->ref_cnt > 0) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter is been used"); + NULL, "Request meter is been used."); } if (mtr->mtr_profile->profile_id == mtr_profile_id) @@ -919,13 +919,13 @@ nfp_mtr_profile_update(struct rte_eth_dev *dev, if (mtr_profile == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, - NULL, "Request meter profile not exist"); + NULL, "Request meter profile not exist."); } if (mtr_profile->in_use) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, - NULL, "Request meter profile is been used"); + NULL, "Request meter profile is been used."); } mtr_profile->in_use = true; @@ -969,7 +969,7 @@ nfp_mtr_stats_update(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, EEXIST, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter id not exist"); + NULL, "Request meter id not exist."); } ret = nfp_mtr_stats_mask_validate(stats_mask, error); @@ -1022,7 +1022,7 @@ nfp_mtr_stats_read(struct rte_eth_dev *dev, if (mtr == NULL) { return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_ID, - NULL, "Request meter not exist"); + NULL, "Request meter not exist."); } *stats_mask = mtr->stats_mask; @@ -1067,7 +1067,7 @@ int nfp_net_mtr_ops_get(struct rte_eth_dev *dev, void *arg) { if (!rte_eth_dev_is_repr(dev)) { - PMD_DRV_LOG(ERR, "Port is not a representor"); + PMD_DRV_LOG(ERR, "Port is not a representor."); return -EINVAL; } @@ -1097,7 +1097,7 @@ nfp_mtr_priv_init(struct nfp_pf_dev *pf_dev) priv = rte_zmalloc("nfp_app_mtr_priv", sizeof(struct nfp_mtr_priv), 0); if (priv == NULL) { - PMD_INIT_LOG(ERR, "nfp app mtr priv creation failed"); + PMD_INIT_LOG(ERR, "NFP app mtr priv creation failed."); return -ENOMEM; } @@ -1107,7 +1107,7 @@ nfp_mtr_priv_init(struct nfp_pf_dev *pf_dev) ret = rte_eal_alarm_set(NFP_METER_STATS_INTERVAL, nfp_mtr_stats_request, (void *)app_fw_flower); if (ret < 0) { - PMD_INIT_LOG(ERR, "nfp mtr timer init failed."); + PMD_INIT_LOG(ERR, "NFP mtr timer init failed."); rte_free(priv); return ret; } diff --git a/drivers/net/nfp/nfp_net_cmsg.c b/drivers/net/nfp/nfp_net_cmsg.c index f2f694be0b..8f77c5588a 100644 --- a/drivers/net/nfp/nfp_net_cmsg.c +++ b/drivers/net/nfp/nfp_net_cmsg.c @@ -45,19 +45,19 @@ nfp_net_cmsg_xmit(struct nfp_net_hw *hw, case NFP_NET_CFG_MBOX_RET_FS_OK: break; case NFP_NET_CFG_MBOX_RET_FS_ERR_NO_SPACE: - PMD_DRV_LOG(ERR, "Not enough space for cmd %u", cmsg->cmd); + PMD_DRV_LOG(ERR, "Not enough space for cmd %u.", cmsg->cmd); ret = -ENOSPC; break; case NFP_NET_CFG_MBOX_RET_FS_ERR_MASK_FULL: - PMD_DRV_LOG(ERR, "The mask table is full for cmd %u", cmsg->cmd); + PMD_DRV_LOG(ERR, "The mask table is full for cmd %u.", cmsg->cmd); ret = -EXFULL; break; case NFP_NET_CFG_MBOX_RET_FS_ERR_CMD_INVALID: - PMD_DRV_LOG(ERR, "The mbox cmd %u invalid", cmsg->cmd); + PMD_DRV_LOG(ERR, "The mbox cmd %u invalid.", cmsg->cmd); ret = -EINVAL; break; default: - PMD_DRV_LOG(ERR, "Unrecognized mbox cmd %u", cmsg->cmd); + PMD_DRV_LOG(ERR, "Unrecognized mbox cmd %u.", cmsg->cmd); ret = -EINVAL; break; } diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c index f76d5a6895..e68ce68229 100644 --- a/drivers/net/nfp/nfp_net_common.c +++ b/drivers/net/nfp/nfp_net_common.c @@ -9,6 +9,7 @@ #include +#include "flower/nfp_flower_cmsg.h" #include "flower/nfp_flower_representor.h" #include "nfd3/nfp_nfd3.h" #include "nfdk/nfp_nfdk.h" @@ -288,7 +289,7 @@ nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw, rte_spinlock_unlock(&net_hw->super.reconfig_lock); if (ret != 0) { - PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x", + PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x.", mbox_cmd, NFP_NET_CFG_UPDATE_MBOX); return -EIO; } @@ -359,20 +360,20 @@ nfp_net_configure(struct rte_eth_dev *dev) /* Checking TX mode */ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { - PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported"); + PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported."); return -EINVAL; } /* Checking RX mode */ if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 && (hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) { - PMD_DRV_LOG(ERR, "RSS not supported"); + PMD_DRV_LOG(ERR, "RSS not supported."); return -EINVAL; } /* Checking MTU set */ if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) { - PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u)", + PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u).", rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD); return -ERANGE; } @@ -387,10 +388,10 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw, uint32_t cap = hw->super.cap; uint32_t cap_ext = hw->super.cap_ext; - PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", + PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d.", pf_dev->ver.major, pf_dev->ver.minor, hw->max_mtu); - PMD_INIT_LOG(INFO, "CAP: %#x", cap); + PMD_INIT_LOG(INFO, "CAP: %#x.", cap); PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cap & NFP_NET_CFG_CTRL_ENABLE ? "ENABLE " : "", cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", @@ -422,7 +423,7 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw, cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", cap & NFP_NET_CFG_CTRL_USO ? "USO" : ""); - PMD_INIT_LOG(INFO, "CAP_WORD1: %#x", cap_ext); + PMD_INIT_LOG(INFO, "CAP_WORD1: %#x.", cap_ext); PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s", cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE ? "PKT_TYPE " : "", cap_ext & NFP_NET_CFG_CTRL_IPSEC ? "IPSEC " : "", @@ -432,7 +433,7 @@ nfp_net_log_device_information(const struct nfp_net_hw *hw, cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER ? "FLOW_STEER " : "", cap_ext & NFP_NET_CFG_CTRL_IN_ORDER ? "VIRTIO_IN_ORDER " : ""); - PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", + PMD_INIT_LOG(INFO, "The max_rx_queues: %u, max_tx_queues: %u.", hw->max_rx_queues, hw->max_tx_queues); } @@ -493,12 +494,12 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev, hw = &net_hw->super; if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 && (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) { - PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled"); + PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled."); return -EBUSY; } if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) { - PMD_DRV_LOG(ERR, "Invalid MAC address"); + PMD_DRV_LOG(ERR, "Invalid MAC address."); return -EINVAL; } @@ -513,7 +514,7 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev, /* Signal the NIC about the change */ if (nfp_reconfig(hw, new_ctrl, update) != 0) { - PMD_DRV_LOG(ERR, "MAC address update failed"); + PMD_DRV_LOG(ERR, "MAC address update failed."); return -EIO; } @@ -531,7 +532,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", dev->data->nb_rx_queues) != 0) { - PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec", + PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec.", dev->data->nb_rx_queues); return -ENOMEM; } @@ -539,13 +540,13 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, hw = nfp_net_get_hw(dev); if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { - PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO"); + PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO."); /* UIO just supports one queue and no LSC */ nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0); if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0) return -1; } else { - PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO"); + PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO."); for (i = 0; i < dev->data->nb_rx_queues; i++) { /* * The first msix vector is reserved for non @@ -645,12 +646,12 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) hw = &net_hw->super; if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_DRV_LOG(ERR, "Promiscuous mode not supported"); + PMD_DRV_LOG(ERR, "Promiscuous mode not supported."); return -ENOTSUP; } if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) { - PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled."); return 0; } @@ -679,12 +680,12 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) hw = &net_hw->super; if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_DRV_LOG(ERR, "Promiscuous mode not supported"); + PMD_DRV_LOG(ERR, "Promiscuous mode not supported."); return -ENOTSUP; } if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled."); return 0; } @@ -717,7 +718,7 @@ nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev, cap_extend = hw->cap_ext; if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) { - PMD_DRV_LOG(ERR, "Allmulticast mode not supported"); + PMD_DRV_LOG(DEBUG, "Allmulticast mode not supported."); return -ENOTSUP; } @@ -779,7 +780,7 @@ nfp_net_pf_speed_update(struct rte_eth_dev *dev, if (pf_dev->speed_updated || aneg == NFP_ANEG_AUTO) { nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp); if (nfp_eth_table == NULL) { - PMD_DRV_LOG(WARNING, "Failed to update port speed."); + PMD_DRV_LOG(DEBUG, "Failed to get nfp_eth_table."); } else { pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx]; free(nfp_eth_table); @@ -834,9 +835,9 @@ nfp_net_link_update_common(struct rte_eth_dev *dev, ret = rte_eth_linkstatus_set(dev, link); if (ret == 0) { if (link->link_status == RTE_ETH_LINK_UP) - PMD_DRV_LOG(INFO, "NIC Link is Up"); + PMD_DRV_LOG(INFO, "NIC Link is Up."); else - PMD_DRV_LOG(INFO, "NIC Link is Down"); + PMD_DRV_LOG(INFO, "NIC Link is Down."); } return ret; @@ -1039,7 +1040,7 @@ nfp_net_xstats_size(const struct rte_eth_dev *dev) if (rte_eth_dev_is_repr(dev)) { repr = dev->data->dev_private; - if (repr->mac_stats == NULL) + if (nfp_flower_repr_is_vf(repr)) vf_flag = true; } else { hw = dev->data->dev_private; @@ -1065,7 +1066,7 @@ nfp_net_xstats_info(const struct rte_eth_dev *dev, uint32_t index) { if (index >= nfp_net_xstats_size(dev)) { - PMD_DRV_LOG(ERR, "xstat index out of bounds"); + PMD_DRV_LOG(ERR, "The xstat index out of bounds."); return NULL; } @@ -1422,7 +1423,7 @@ nfp_net_common_init(struct nfp_pf_dev *pf_dev, hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS); if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) { PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue " - "pairs for use", pci_dev->name); + "pairs for use.", pci_dev->name); return -ENODEV; } @@ -1587,12 +1588,12 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) rte_eth_linkstatus_get(dev, &link); if (link.link_status != 0) - PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s.", dev->data->port_id, link.link_speed, link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); else - PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id); + PMD_DRV_LOG(INFO, " Port %d: Link Down.", dev->data->port_id); PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT, pci_dev->addr.domain, pci_dev->addr.bus, @@ -1674,7 +1675,7 @@ nfp_net_dev_interrupt_handler(void *param) if (rte_eal_alarm_set(timeout * 1000, nfp_net_dev_interrupt_delayed_handler, (void *)dev) != 0) { - PMD_INIT_LOG(ERR, "Error setting alarm"); + PMD_INIT_LOG(ERR, "Error setting alarm."); /* Unmasking */ nfp_net_irq_unmask(dev); } @@ -1690,14 +1691,14 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, /* MTU setting is forbidden if port is started */ if (dev->data->dev_started) { - PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + PMD_DRV_LOG(ERR, "Port %d must be stopped before configuration.", dev->data->port_id); return -EBUSY; } /* MTU larger than current mbufsize not supported */ if (mtu > hw->flbufsz) { - PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported", + PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported.", mtu, hw->flbufsz); return -ERANGE; } @@ -1777,7 +1778,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev, if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)" - " doesn't match hardware can supported (%d)", + " does not match hardware can supported (%d).", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -1869,7 +1870,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev, if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)" - " doesn't match hardware can supported (%d)", + " does not match hardware can supported (%d).", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -1979,7 +1980,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, /* Checking if RSS is enabled */ if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) { if (rss_hf != 0) { - PMD_DRV_LOG(ERR, "RSS unsupported"); + PMD_DRV_LOG(ERR, "RSS unsupported."); return -EINVAL; } @@ -1987,7 +1988,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, } if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { - PMD_DRV_LOG(ERR, "RSS hash key too long"); + PMD_DRV_LOG(ERR, "RSS hash key too long."); return -EINVAL; } @@ -2089,7 +2090,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev) dev_conf = &dev->data->dev_conf; if (dev_conf == NULL) { - PMD_DRV_LOG(ERR, "Wrong rss conf"); + PMD_DRV_LOG(ERR, "Wrong rss conf."); return -EINVAL; } @@ -2154,9 +2155,9 @@ nfp_net_close_tx_queue(struct rte_eth_dev *dev) int nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw, size_t idx, - uint16_t port) + uint16_t port, + uint32_t ctrl) { - int ret; uint32_t i; struct nfp_hw *hw = &net_hw->super; @@ -2172,16 +2173,7 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw, (net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]); } - rte_spinlock_lock(&hw->reconfig_lock); - - nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN); - rte_wmb(); - - ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VXLAN); - - rte_spinlock_unlock(&hw->reconfig_lock); - - return ret; + return nfp_reconfig(hw, ctrl, NFP_NET_CFG_UPDATE_VXLAN); } /* @@ -2194,7 +2186,7 @@ nfp_net_check_dma_mask(struct nfp_pf_dev *pf_dev, { if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 && rte_mem_check_dma_mask(40) != 0) { - PMD_DRV_LOG(ERR, "Device %s can't be used: restricted dma mask to 40 bits!", + PMD_DRV_LOG(ERR, "Device %s can not be used: restricted dma mask to 40 bits!", name); return -ENODEV; } @@ -2215,7 +2207,7 @@ nfp_net_txrwb_alloc(struct rte_eth_dev *eth_dev) rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); if (net_hw->txrwb_mz == NULL) { - PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back", + PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back.", mz_name); return -ENOMEM; } @@ -2390,7 +2382,7 @@ nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version) if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) { if (version.major < 5) { - PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d", + PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d.", version.major); return false; } @@ -2796,7 +2788,7 @@ nfp_net_sriov_update(struct nfp_net_hw *net_hw, ret = nfp_net_vf_reconfig(net_hw, pf_dev, update, pf_dev->vf_base_id, NFP_NET_VF_CFG_MB_VF_NUM); if (ret != 0) { - PMD_INIT_LOG(ERR, "Error nfp VF reconfig"); + PMD_INIT_LOG(ERR, "Error nfp VF reconfig."); return ret; } @@ -2814,11 +2806,11 @@ nfp_net_vf_queues_config(struct nfp_net_hw *net_hw, ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG); if (ret != 0) { if (ret == -ENOTSUP) { - PMD_INIT_LOG(WARNING, "Set VF max queue not supported"); + PMD_INIT_LOG(DEBUG, "Set VF max queue not supported."); return 0; } - PMD_INIT_LOG(ERR, "Set VF max queue failed"); + PMD_INIT_LOG(ERR, "Set VF max queue failed."); return ret; } @@ -2827,7 +2819,7 @@ nfp_net_vf_queues_config(struct nfp_net_hw *net_hw, ret = nfp_net_vf_reconfig(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG, pf_dev->queue_per_vf, pf_dev->vf_base_id + offset + i); if (ret != 0) { - PMD_INIT_LOG(ERR, "Set VF max_queue failed"); + PMD_INIT_LOG(ERR, "Set VF max_queue failed."); return ret; } } @@ -2844,11 +2836,11 @@ nfp_net_sriov_init(struct nfp_net_hw *net_hw, ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_SPLIT); if (ret != 0) { if (ret == -ENOTSUP) { - PMD_INIT_LOG(WARNING, "Set VF split not supported"); + PMD_INIT_LOG(DEBUG, "Set VF split not supported."); return 0; } - PMD_INIT_LOG(ERR, "Set VF split failed"); + PMD_INIT_LOG(ERR, "Set VF split failed."); return ret; } @@ -2856,7 +2848,7 @@ nfp_net_sriov_init(struct nfp_net_hw *net_hw, ret = nfp_net_sriov_update(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_SPLIT); if (ret != 0) { - PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed"); + PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed."); return ret; } @@ -2874,15 +2866,350 @@ nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw, ret = nfp_net_sriov_init(net_hw, pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to init sriov module"); + PMD_INIT_LOG(ERR, "Failed to init sriov module."); return ret; } ret = nfp_net_vf_queues_config(net_hw, pf_dev); if (ret != 0) { - PMD_INIT_LOG(ERR, "Failed to config vf queue"); + PMD_INIT_LOG(ERR, "Failed to config vf queue."); + return ret; + } + + return 0; +} + +static inline bool +nfp_net_meta_has_no_port_type(__rte_unused struct nfp_net_meta_parsed *meta) +{ + return true; +} + +static inline bool +nfp_net_meta_is_not_pf_port(__rte_unused struct nfp_net_meta_parsed *meta) +{ + return false; +} + +static inline bool +nfp_net_meta_is_pf_port(struct nfp_net_meta_parsed *meta) +{ + return nfp_flower_port_is_phy_port(meta->port_id); +} + +bool +nfp_net_recv_pkt_meta_check_register(struct nfp_net_hw_priv *hw_priv) +{ + struct nfp_pf_dev *pf_dev; + + pf_dev = hw_priv->pf_dev; + if (!hw_priv->is_pf) { + pf_dev->recv_pkt_meta_check_t = nfp_net_meta_has_no_port_type; + return true; + } + + switch (pf_dev->app_fw_id) { + case NFP_APP_FW_CORE_NIC: + pf_dev->recv_pkt_meta_check_t = nfp_net_meta_has_no_port_type; + break; + case NFP_APP_FW_FLOWER_NIC: + if (pf_dev->multi_pf.enabled) + pf_dev->recv_pkt_meta_check_t = nfp_net_meta_is_pf_port; + else + pf_dev->recv_pkt_meta_check_t = nfp_net_meta_is_not_pf_port; + break; + default: + PMD_INIT_LOG(ERR, "Unsupported Firmware loaded."); + return false; + } + + return true; +} + +static int +nfp_net_get_nfp_index(struct rte_eth_dev *dev) +{ + int nfp_idx; + + if (rte_eth_dev_is_repr(dev)) { + struct nfp_flower_representor *repr; + repr = dev->data->dev_private; + nfp_idx = repr->nfp_idx; + } else { + struct nfp_net_hw *net_hw; + net_hw = dev->data->dev_private; + nfp_idx = net_hw->nfp_idx; + } + + return nfp_idx; +} + +int +nfp_net_get_eeprom_len(__rte_unused struct rte_eth_dev *dev) +{ + return RTE_ETHER_ADDR_LEN; +} + +static int +nfp_net_get_port_mac_hwinfo(struct nfp_net_hw_priv *hw_priv, + uint32_t index, + struct rte_ether_addr *mac_addr) +{ + int ret; + char hwinfo[32]; + struct nfp_nsp *nsp; + + snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac", index); + + nsp = nfp_nsp_open(hw_priv->pf_dev->cpp); + if (nsp == NULL) + return -EOPNOTSUPP; + + ret = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo)); + nfp_nsp_close(nsp); + + if (ret != 0) { + PMD_DRV_LOG(ERR, "Read persistent MAC address failed for eth_index %u.", index); + return ret; + } + + ret = rte_ether_unformat_addr(hwinfo, mac_addr); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Can not parse persistent MAC address."); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +nfp_net_set_port_mac_hwinfo(struct nfp_net_hw_priv *hw_priv, + uint32_t index, + struct rte_ether_addr *mac_addr) +{ + int ret; + char hwinfo_mac[32]; + struct nfp_nsp *nsp; + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); + snprintf(hwinfo_mac, sizeof(hwinfo_mac), "eth%u.mac=%s", index, buf); + + nsp = nfp_nsp_open(hw_priv->pf_dev->cpp); + if (nsp == NULL) + return -EOPNOTSUPP; + + ret = nfp_nsp_hwinfo_set(nsp, hwinfo_mac, sizeof(hwinfo_mac)); + nfp_nsp_close(nsp); + + if (ret != 0) { + PMD_DRV_LOG(ERR, "HWinfo set failed: %d.", ret); + return ret; + } + + return 0; +} + +int +nfp_net_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + int ret; + uint32_t nfp_idx; + struct nfp_net_hw *net_hw; + struct rte_ether_addr mac_addr; + struct nfp_net_hw_priv *hw_priv; + + if (eeprom->length == 0) + return -EINVAL; + + hw_priv = dev->process_private; + nfp_idx = nfp_net_get_nfp_index(dev); + + ret = nfp_net_get_port_mac_hwinfo(hw_priv, nfp_idx, &mac_addr); + if (ret != 0) + return -EOPNOTSUPP; + + net_hw = nfp_net_get_hw(dev); + eeprom->magic = net_hw->vendor_id | (net_hw->device_id << 16); + memcpy(eeprom->data, mac_addr.addr_bytes + eeprom->offset, eeprom->length); + + return 0; +} + +int +nfp_net_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + int ret; + uint32_t nfp_idx; + struct nfp_net_hw *net_hw; + struct rte_ether_addr mac_addr; + struct nfp_net_hw_priv *hw_priv; + + if (eeprom->length == 0) + return -EINVAL; + + net_hw = nfp_net_get_hw(dev); + if (eeprom->magic != (uint32_t)(net_hw->vendor_id | (net_hw->device_id << 16))) + return -EINVAL; + + hw_priv = dev->process_private; + nfp_idx = nfp_net_get_nfp_index(dev); + ret = nfp_net_get_port_mac_hwinfo(hw_priv, nfp_idx, &mac_addr); + if (ret != 0) + return -EOPNOTSUPP; + + memcpy(mac_addr.addr_bytes + eeprom->offset, eeprom->data, eeprom->length); + ret = nfp_net_set_port_mac_hwinfo(hw_priv, nfp_idx, &mac_addr); + if (ret != 0) + return -EOPNOTSUPP; + + return 0; +} + +int +nfp_net_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *info) +{ + int ret = 0; + uint8_t data; + uint32_t idx; + uint32_t read_len; + struct nfp_nsp *nsp; + struct nfp_net_hw_priv *hw_priv; + struct nfp_eth_table_port *eth_port; + + hw_priv = dev->process_private; + nsp = nfp_nsp_open(hw_priv->pf_dev->cpp); + if (nsp == NULL) { + PMD_DRV_LOG(ERR, "Unable to open NSP."); + return -EIO; + } + + if (!nfp_nsp_has_read_module_eeprom(nsp)) { + PMD_DRV_LOG(ERR, "Read module eeprom not supported. Please update flash."); + ret = -EOPNOTSUPP; + goto exit_close_nsp; + } + + idx = nfp_net_get_idx(dev); + eth_port = &hw_priv->pf_dev->nfp_eth_table->ports[idx]; + switch (eth_port->interface) { + case NFP_INTERFACE_SFP: + /* FALLTHROUGH */ + case NFP_INTERFACE_SFP28: + /* Read which revision the transceiver compiles with */ + ret = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, + SFP_SFF8472_COMPLIANCE, &data, 1, &read_len); + if (ret != 0) + goto exit_close_nsp; + + if (data == 0) { + info->type = RTE_ETH_MODULE_SFF_8079; + info->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + info->type = RTE_ETH_MODULE_SFF_8472; + info->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + break; + case NFP_INTERFACE_QSFP: + /* Read which revision the transceiver compiles with */ + ret = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, + SFP_SFF_REV_COMPLIANCE, &data, 1, &read_len); + if (ret != 0) + goto exit_close_nsp; + + if (data == 0) { + info->type = RTE_ETH_MODULE_SFF_8436; + info->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; + } else { + info->type = RTE_ETH_MODULE_SFF_8636; + info->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + } + break; + case NFP_INTERFACE_QSFP28: + info->type = RTE_ETH_MODULE_SFF_8636; + info->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported module %#x detected.", + eth_port->interface); + ret = -EINVAL; + } + +exit_close_nsp: + nfp_nsp_close(nsp); + return ret; +} + +int +nfp_net_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + int ret = 0; + uint32_t idx; + struct nfp_nsp *nsp; + struct nfp_net_hw_priv *hw_priv; + struct nfp_eth_table_port *eth_port; + + hw_priv = dev->process_private; + nsp = nfp_nsp_open(hw_priv->pf_dev->cpp); + if (nsp == NULL) { + PMD_DRV_LOG(ERR, "Unable to open NSP."); + return -EIO; + } + + if (!nfp_nsp_has_read_module_eeprom(nsp)) { + PMD_DRV_LOG(ERR, "Read module eeprom not supported. Please update flash."); + ret = -EOPNOTSUPP; + goto exit_close_nsp; + } + + idx = nfp_net_get_idx(dev); + eth_port = &hw_priv->pf_dev->nfp_eth_table->ports[idx]; + ret = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, info->offset, + info->data, info->length, &info->length); + if (ret != 0) { + if (info->length) + PMD_DRV_LOG(ERR, "Incomplete read from module EEPROM: %d.", ret); + else + PMD_DRV_LOG(ERR, "Read from module EEPROM failed: %d.", ret); + } + +exit_close_nsp: + nfp_nsp_close(nsp); + return ret; +} + +static int +nfp_net_led_control(struct rte_eth_dev *dev, + bool is_on) +{ + int ret; + uint32_t nfp_idx; + struct nfp_net_hw_priv *hw_priv; + + hw_priv = dev->process_private; + nfp_idx = nfp_net_get_nfp_index(dev); + + ret = nfp_eth_set_idmode(hw_priv->pf_dev->cpp, nfp_idx, is_on); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Set nfp idmode failed."); return ret; } return 0; } + +int +nfp_net_led_on(struct rte_eth_dev *dev) +{ + return nfp_net_led_control(dev, true); +} + +int +nfp_net_led_off(struct rte_eth_dev *dev) +{ + return nfp_net_led_control(dev, false); +} diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h index 6291a794b2..d85a00a75e 100644 --- a/drivers/net/nfp/nfp_net_common.h +++ b/drivers/net/nfp/nfp_net_common.h @@ -169,6 +169,9 @@ struct nfp_pf_dev { /** Record the speed uptade */ bool speed_updated; + + /** Function pointer used to check the metadata of recv pkts. */ + bool (*recv_pkt_meta_check_t)(struct nfp_net_meta_parsed *meta); }; #define NFP_NET_ETH_FLOW_LIMIT 8 @@ -345,7 +348,10 @@ void nfp_net_stop_rx_queue(struct rte_eth_dev *dev); void nfp_net_close_rx_queue(struct rte_eth_dev *dev); void nfp_net_stop_tx_queue(struct rte_eth_dev *dev); void nfp_net_close_tx_queue(struct rte_eth_dev *dev); -int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port); +int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, + size_t idx, + uint16_t port, + uint32_t ctrl); void nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv, uint16_t *min_rx_desc, uint16_t *max_rx_desc); @@ -386,6 +392,15 @@ bool nfp_net_version_check(struct nfp_hw *hw, void nfp_net_ctrl_bar_size_set(struct nfp_pf_dev *pf_dev); void nfp_net_notify_port_speed(struct nfp_net_hw *hw, struct rte_eth_link *link); +bool nfp_net_recv_pkt_meta_check_register(struct nfp_net_hw_priv *hw_priv); + +int nfp_net_get_eeprom_len(struct rte_eth_dev *dev); +int nfp_net_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +int nfp_net_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +int nfp_net_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *info); +int nfp_net_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info); +int nfp_net_led_on(struct rte_eth_dev *dev); +int nfp_net_led_off(struct rte_eth_dev *dev); #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\ ((struct nfp_app_fw_nic *)app_fw_priv) diff --git a/drivers/net/nfp/nfp_net_ctrl.c b/drivers/net/nfp/nfp_net_ctrl.c index b34d8f140f..cc56ff69e9 100644 --- a/drivers/net/nfp/nfp_net_ctrl.c +++ b/drivers/net/nfp/nfp_net_ctrl.c @@ -50,7 +50,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev) offset = data - net_hw->super.ctrl_bar; if (data + NFP_NET_CFG_TLV_VALUE > end) { - PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV"); + PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV."); return -EINVAL; } @@ -58,14 +58,14 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev) length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr); if ((length & (NFP_NET_CFG_TLV_LENGTH_INC - 1)) != 0) { - PMD_DRV_LOG(ERR, "TLV size not multiple of 4B len: %u", length); + PMD_DRV_LOG(ERR, "TLV size not multiple of 4B len: %u.", length); return -EINVAL; } /* Advance past the header */ data += NFP_NET_CFG_TLV_VALUE; if (data + length > end) { - PMD_DRV_LOG(ERR, "Oversized TLV offset: %u len: %u", + PMD_DRV_LOG(ERR, "Oversized TLV offset: %u len: %u.", offset, length); return -EINVAL; } @@ -74,7 +74,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev) switch (tlv_type) { case NFP_NET_CFG_TLV_TYPE_UNKNOWN: - PMD_DRV_LOG(ERR, "Unknown TLV at offset: %u", offset); + PMD_DRV_LOG(ERR, "Unknown TLV at offset: %u.", offset); return -EINVAL; case NFP_NET_CFG_TLV_TYPE_RESERVED: break; @@ -82,7 +82,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev) if (length == 0) return 0; - PMD_DRV_LOG(ERR, "END TLV should be empty, has len: %u", length); + PMD_DRV_LOG(ERR, "END TLV should be empty, has len: %u.", length); return -EINVAL; case NFP_NET_CFG_TLV_TYPE_MBOX: caps->mbox_len = length; @@ -100,12 +100,12 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev) if (FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr) == 0) break; - PMD_DRV_LOG(ERR, "Unknown TLV type: %u offset: %u len: %u", + PMD_DRV_LOG(ERR, "Unknown TLV type: %u offset: %u len: %u.", tlv_type, offset, length); return -EINVAL; } } - PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV"); + PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV."); return -EINVAL; } diff --git a/drivers/net/nfp/nfp_net_flow.c b/drivers/net/nfp/nfp_net_flow.c index d72f6ce84c..7f3fff2186 100644 --- a/drivers/net/nfp/nfp_net_flow.c +++ b/drivers/net/nfp/nfp_net_flow.c @@ -194,23 +194,23 @@ nfp_net_flow_calculate_items(const struct rte_flow_item items[], for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) { switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected"); + PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_ETH detected."); *match_len = sizeof(struct nfp_net_cmsg_match_eth); *item_type = RTE_FLOW_ITEM_TYPE_ETH; ret = 0; break; case RTE_FLOW_ITEM_TYPE_IPV4: - PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected"); + PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected."); *match_len = sizeof(struct nfp_net_cmsg_match_v4); *item_type = RTE_FLOW_ITEM_TYPE_IPV4; return 0; case RTE_FLOW_ITEM_TYPE_IPV6: - PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected"); + PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected."); *match_len = sizeof(struct nfp_net_cmsg_match_v6); *item_type = RTE_FLOW_ITEM_TYPE_IPV6; return 0; default: - PMD_DRV_LOG(ERR, "Can't calculate match length"); + PMD_DRV_LOG(ERR, "Can not calculate match length."); *match_len = 0; return -ENOTSUP; } @@ -460,7 +460,7 @@ nfp_net_flow_item_check(const struct rte_flow_item *item, /* item->last and item->mask cannot exist without item->spec. */ if (item->spec == NULL) { if (item->mask || item->last) { - PMD_DRV_LOG(ERR, "'mask' or 'last' field provided" + PMD_DRV_LOG(ERR, "The 'mask' or 'last' field provided" " without a corresponding 'spec'."); return -EINVAL; } @@ -523,7 +523,7 @@ nfp_net_flow_compile_items(const struct rte_flow_item items[], } if (proc == NULL) { - PMD_DRV_LOG(ERR, "No next item provided for %d", item->type); + PMD_DRV_LOG(ERR, "No next item provided for %d.", item->type); ret = -ENOTSUP; break; } @@ -531,20 +531,20 @@ nfp_net_flow_compile_items(const struct rte_flow_item items[], /* Perform basic sanity checks */ ret = nfp_net_flow_item_check(item, proc); if (ret != 0) { - PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d check failed.", item->type); ret = -EINVAL; break; } if (proc->merge == NULL) { - PMD_DRV_LOG(ERR, "NFP flow item %d no proc function", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d no proc function.", item->type); ret = -ENOTSUP; break; } ret = proc->merge(nfp_flow, item, proc); if (ret != 0) { - PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed", item->type); + PMD_DRV_LOG(ERR, "NFP flow item %d exact merge failed.", item->type); break; } @@ -590,7 +590,7 @@ nfp_net_flow_action_queue(struct rte_eth_dev *dev, queue = action->conf; if (queue->index >= dev->data->nb_rx_queues || dev->data->rx_queues[queue->index] == NULL) { - PMD_DRV_LOG(ERR, "Queue index is illegal"); + PMD_DRV_LOG(ERR, "Queue index is illegal."); return -EINVAL; } @@ -611,19 +611,19 @@ nfp_net_flow_compile_actions(struct rte_eth_dev *dev, for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { switch (action->type) { case RTE_FLOW_ACTION_TYPE_DROP: - PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP"); + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP."); nfp_net_flow_action_drop(nfp_flow); return 0; case RTE_FLOW_ACTION_TYPE_MARK: - PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK"); + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK."); nfp_net_flow_action_mark(nfp_flow, action); break; case RTE_FLOW_ACTION_TYPE_QUEUE: - PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE"); + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_QUEUE."); ret = nfp_net_flow_action_queue(dev, nfp_flow, action); break; default: - PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type); + PMD_DRV_LOG(ERR, "Unsupported action type: %d.", action->type); return -ENOTSUP; } } @@ -1089,7 +1089,7 @@ nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev, priv = rte_zmalloc("nfp_app_nic_priv", sizeof(struct nfp_net_priv), 0); if (priv == NULL) { - PMD_INIT_LOG(ERR, "NFP app nic priv creation failed"); + PMD_INIT_LOG(ERR, "NFP app nic priv creation failed."); ret = -ENOMEM; goto exit; } @@ -1120,7 +1120,7 @@ nfp_net_flow_priv_init(struct nfp_pf_dev *pf_dev, flow_hash_params.entries = priv->flow_limit * NFP_NET_HASH_REDUNDANCE; priv->flow_table = rte_hash_create(&flow_hash_params); if (priv->flow_table == NULL) { - PMD_INIT_LOG(ERR, "flow hash table creation failed"); + PMD_INIT_LOG(ERR, "Flow hash table creation failed."); ret = -ENOMEM; goto free_flow_position; } diff --git a/drivers/net/nfp/nfp_net_meta.c b/drivers/net/nfp/nfp_net_meta.c index 5a67f87bee..70169eba6b 100644 --- a/drivers/net/nfp/nfp_net_meta.c +++ b/drivers/net/nfp/nfp_net_meta.c @@ -177,7 +177,7 @@ nfp_net_meta_parse_qinq(const struct nfp_net_meta_parsed *meta, mb->vlan_tci = rte_cpu_to_le_16(meta->vlan[0].tci); mb->vlan_tci_outer = rte_cpu_to_le_16(meta->vlan[1].tci); - PMD_RX_LOG(DEBUG, "Received outer vlan TCI is %u inner vlan TCI is %u", + PMD_RX_LOG(DEBUG, "Received outer vlan TCI is %u inner vlan TCI is %u.", mb->vlan_tci_outer, mb->vlan_tci); mb->ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED; } diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c index 740cc6eac7..35fb637b21 100644 --- a/drivers/net/nfp/nfp_rxtx.c +++ b/drivers/net/nfp/nfp_rxtx.c @@ -151,7 +151,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) uint64_t dma_addr; struct nfp_net_dp_buf *rxe = rxq->rxbufs; - PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %hu descriptors", + PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %hu descriptors.", rxq->rx_count); for (i = 0; i < rxq->rx_count; i++) { @@ -159,7 +159,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); if (mbuf == NULL) { - PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%hu", + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%hu.", rxq->qidx); return -ENOMEM; } @@ -178,7 +178,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) rte_wmb(); /* Not advertising the whole ring as the firmware gets confused if so */ - PMD_RX_LOG(DEBUG, "Increment FL write pointer in %hu", rxq->rx_count - 1); + PMD_RX_LOG(DEBUG, "Increment FL write pointer in %hu.", rxq->rx_count - 1); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); @@ -260,7 +260,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, mbuf_ptype |= RTE_PTYPE_L3_IPV6; break; default: - PMD_RX_LOG(DEBUG, "Unrecognized nfp outer layer 3 packet type: %u", + PMD_RX_LOG(DEBUG, "Unrecognized nfp outer layer 3 packet type: %u.", nfp_ptype->outer_l3_ptype); break; } @@ -278,7 +278,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, mbuf_ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP; break; default: - PMD_RX_LOG(DEBUG, "Unrecognized nfp tunnel packet type: %u", + PMD_RX_LOG(DEBUG, "Unrecognized nfp tunnel packet type: %u.", nfp_tunnel_ptype); break; } @@ -305,7 +305,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, mbuf_ptype |= NFP_PTYPE2RTE(nfp_tunnel_ptype, L4_SCTP); break; default: - PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 4 packet type: %u", + PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 4 packet type: %u.", nfp_ptype->l4_ptype); break; } @@ -332,7 +332,7 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, mbuf_ptype |= NFP_PTYPE2RTE(nfp_tunnel_ptype, L3_IPV6_EXT_UNKNOWN); break; default: - PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 3 packet type: %u", + PMD_RX_LOG(DEBUG, "Unrecognized nfp layer 3 packet type: %u.", nfp_ptype->l3_ptype); break; } @@ -416,6 +416,7 @@ nfp_net_recv_pkts(void *rx_queue, struct nfp_net_hw *hw; struct rte_mbuf *new_mb; struct nfp_net_rxq *rxq; + struct nfp_pf_dev *pf_dev; struct nfp_net_dp_buf *rxb; struct nfp_net_rx_desc *rxds; uint16_t avail_multiplexed = 0; @@ -426,16 +427,17 @@ nfp_net_recv_pkts(void *rx_queue, * DPDK just checks the queue is lower than max queues * enabled. But the queue needs to be configured. */ - PMD_RX_LOG(ERR, "RX Bad queue"); + PMD_RX_LOG(ERR, "RX Bad queue."); return 0; } hw = rxq->hw; + pf_dev = rxq->hw_priv->pf_dev; while (avail + avail_multiplexed < nb_pkts) { rxb = &rxq->rxbufs[rxq->rd_p]; if (unlikely(rxb == NULL)) { - PMD_RX_LOG(ERR, "rxb does not exist!"); + PMD_RX_LOG(ERR, "The rxb does not exist!"); break; } @@ -455,7 +457,7 @@ nfp_net_recv_pkts(void *rx_queue, */ new_mb = rte_pktmbuf_alloc(rxq->mem_pool); if (unlikely(new_mb == NULL)) { - PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu", + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu.", rxq->port_id, rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); break; @@ -468,7 +470,7 @@ nfp_net_recv_pkts(void *rx_queue, mb = rxb->mbuf; rxb->mbuf = new_mb; - PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u", + PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u.", rxds->rxd.data_len, rxq->mbuf_size); /* Size of this segment */ @@ -482,7 +484,7 @@ nfp_net_recv_pkts(void *rx_queue, * responsibility of avoiding it. But we have * to give some info about the error. */ - PMD_RX_LOG(ERR, "mbuf overflow likely due to the RX offset."); + PMD_RX_LOG(ERR, "The mbuf overflow likely due to the RX offset."); rte_pktmbuf_free(mb); break; } @@ -519,20 +521,22 @@ nfp_net_recv_pkts(void *rx_queue, if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */ rxq->rd_p = 0; - if (((meta.flags >> NFP_NET_META_PORTID) & 0x1) == 0) { + if (pf_dev->recv_pkt_meta_check_t(&meta)) { rx_pkts[avail++] = mb; - } else if (nfp_flower_pf_dispatch_pkts(rxq, mb, meta.port_id)) { - avail_multiplexed++; } else { - rte_pktmbuf_free(mb); - break; + if (nfp_flower_pf_dispatch_pkts(rxq, mb, meta.port_id)) { + avail_multiplexed++; + } else { + rte_pktmbuf_free(mb); + break; + } } } if (nb_hold == 0) return nb_hold; - PMD_RX_LOG(DEBUG, "RX port_id=%hu queue_id=%hu, %hu packets received", + PMD_RX_LOG(DEBUG, "RX port_id=%hu queue_id=%hu, %hu packets received.", rxq->port_id, rxq->qidx, avail); nb_hold += rxq->nb_rx_hold; @@ -543,7 +547,7 @@ nfp_net_recv_pkts(void *rx_queue, */ rte_wmb(); if (nb_hold > rxq->rx_free_thresh) { - PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu", + PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu.", rxq->port_id, rxq->qidx, nb_hold, avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); nb_hold = 0; @@ -630,7 +634,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc); if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 || nb_desc > max_rx_desc || nb_desc < min_rx_desc) { - PMD_DRV_LOG(ERR, "Wrong nb_desc value"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value."); return -EINVAL; } @@ -678,7 +682,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, sizeof(struct nfp_net_rx_desc) * max_rx_desc, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { - PMD_DRV_LOG(ERR, "Error allocating rx dma"); + PMD_DRV_LOG(ERR, "Error allocating rx dma."); nfp_net_rx_queue_release(dev, queue_idx); dev->data->rx_queues[queue_idx] = NULL; return -ENOMEM; @@ -748,15 +752,15 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) uint32_t todo; uint32_t qcp_rd_p; - PMD_TX_LOG(DEBUG, "queue %hu. Check for descriptor with a complete" - " status", txq->qidx); + PMD_TX_LOG(DEBUG, "Queue %hu. Check for descriptor with a complete" + " status.", txq->qidx); /* Work out how many packets have been sent */ qcp_rd_p = nfp_net_read_tx_free_qcp(txq); if (qcp_rd_p == txq->rd_p) { - PMD_TX_LOG(DEBUG, "queue %hu: It seems harrier is not sending " - "packets (%u, %u)", txq->qidx, + PMD_TX_LOG(DEBUG, "Queue %hu: It seems harrier is not sending " + "packets (%u, %u).", txq->qidx, qcp_rd_p, txq->rd_p); return 0; } @@ -766,7 +770,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) else todo = qcp_rd_p + txq->tx_count - txq->rd_p; - PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u", + PMD_TX_LOG(DEBUG, "The qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u.", qcp_rd_p, txq->rd_p, txq->rd_p); if (todo == 0) diff --git a/drivers/net/nfp/nfp_rxtx_vec_avx2.c b/drivers/net/nfp/nfp_rxtx_vec_avx2.c index 2a033133a1..66d003f64d 100644 --- a/drivers/net/nfp/nfp_rxtx_vec_avx2.c +++ b/drivers/net/nfp/nfp_rxtx_vec_avx2.c @@ -125,7 +125,7 @@ nfp_vec_avx2_recv1(struct nfp_net_rxq *rxq, { /* Allocate a new mbuf into the software ring. */ if (rte_pktmbuf_alloc_bulk(rxq->mem_pool, rxb, 1) < 0) { - PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu", + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu.", rxq->port_id, rxq->qidx); nfp_net_mbuf_alloc_failed(rxq); return -ENOMEM; @@ -146,7 +146,7 @@ nfp_vec_avx2_recv4(struct nfp_net_rxq *rxq, { /* Allocate 4 new mbufs into the software ring. */ if (rte_pktmbuf_alloc_bulk(rxq->mem_pool, rxb, 4) < 0) { - PMD_RX_LOG(DEBUG, "RX mbuf bulk alloc failed port_id=%u queue_id=%hu", + PMD_RX_LOG(DEBUG, "RX mbuf bulk alloc failed port_id=%u queue_id=%hu.", rxq->port_id, rxq->qidx); return -ENOMEM; } @@ -188,7 +188,7 @@ nfp_net_vec_avx2_recv_pkts(void *rx_queue, struct nfp_net_rxq *rxq = rx_queue; if (unlikely(rxq == NULL)) { - PMD_RX_LOG(ERR, "RX Bad queue"); + PMD_RX_LOG(ERR, "RX Bad queue."); return 0; } @@ -262,7 +262,7 @@ nfp_net_vec_avx2_recv_pkts(void *rx_queue, if (nb_hold == 0) return nb_hold; - PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received", + PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received.", rxq->port_id, (unsigned int)rxq->qidx, nb_hold); nb_hold += rxq->nb_rx_hold; @@ -273,7 +273,7 @@ nfp_net_vec_avx2_recv_pkts(void *rx_queue, */ rte_wmb(); if (nb_hold > rxq->rx_free_thresh) { - PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu", + PMD_RX_LOG(DEBUG, "The port=%hu queue=%hu nb_hold=%hu avail=%hu.", rxq->port_id, rxq->qidx, nb_hold, avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); nb_hold = 0; diff --git a/drivers/net/nfp/nfp_service.c b/drivers/net/nfp/nfp_service.c index 37e2187a3f..38ab7a39f5 100644 --- a/drivers/net/nfp/nfp_service.c +++ b/drivers/net/nfp/nfp_service.c @@ -27,14 +27,14 @@ nfp_service_enable(const struct rte_service_spec *service_spec, /* Register the service */ ret = rte_service_component_register(service_spec, &info->id); if (ret != 0) { - PMD_DRV_LOG(DEBUG, "Could not register %s", service_spec->name); + PMD_DRV_LOG(DEBUG, "Could not register %s.", service_spec->name); return -EINVAL; } /* Set the NFP service runstate of a component. */ rte_service_component_runstate_set(info->id, 1); - PMD_DRV_LOG(DEBUG, "Enable service %s successfully", service_spec->name); + PMD_DRV_LOG(DEBUG, "Enable service %s successfully.", service_spec->name); return 0; } @@ -47,7 +47,7 @@ nfp_service_disable(struct nfp_service_info *info) service_name = rte_service_get_name(info->id); if (service_name == NULL) { - PMD_DRV_LOG(ERR, "Could not find service %u", info->id); + PMD_DRV_LOG(ERR, "Could not find service %u.", info->id); return -EINVAL; } @@ -60,7 +60,7 @@ nfp_service_disable(struct nfp_service_info *info) } if (i == NFP_SERVICE_DISABLE_WAIT_COUNT) - PMD_DRV_LOG(ERR, "Could not stop service %s", service_name); + PMD_DRV_LOG(ERR, "Could not stop service %s.", service_name); rte_service_component_unregister(info->id); diff --git a/drivers/net/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/nfp/nfpcore/nfp6000_pcie.c index 2c989ee70c..4693577f4e 100644 --- a/drivers/net/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/nfp/nfpcore/nfp6000_pcie.c @@ -622,7 +622,7 @@ nfp6000_area_acquire(struct nfp_cpp_area *area) bar_num = nfp_alloc_bar(nfp, priv); if (bar_num < 0) { - PMD_DRV_LOG(ERR, "Failed to allocate bar %d:%d:%d:%#lx: %d", + PMD_DRV_LOG(ERR, "Failed to allocate bar %d:%d:%d:%#lx: %d.", priv->target, priv->action, priv->token, priv->offset, bar_num); return bar_num; @@ -704,7 +704,7 @@ nfp6000_area_read(struct nfp_cpp_area *area, /* Unaligned? Translate to an explicit access */ if (((priv->offset + offset) & (width - 1)) != 0) { - PMD_DRV_LOG(ERR, "aread_read unaligned!!!"); + PMD_DRV_LOG(ERR, "The aread_read unaligned!!!"); return -EINVAL; } @@ -860,7 +860,7 @@ nfp6000_get_dsn(struct rte_pci_device *pci_dev, pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN); if (pos <= 0) { - PMD_DRV_LOG(ERR, "PCI_EXT_CAP_ID_DSN not found"); + PMD_DRV_LOG(ERR, "PCI_EXT_CAP_ID_DSN not found."); return -ENODEV; } @@ -868,7 +868,7 @@ nfp6000_get_dsn(struct rte_pci_device *pci_dev, len = sizeof(tmp); if (rte_pci_read_config(pci_dev, &tmp, len, pos) < 0) { - PMD_DRV_LOG(ERR, "nfp get device serial number failed"); + PMD_DRV_LOG(ERR, "NFP get device serial number failed."); return -ENOENT; } @@ -933,7 +933,7 @@ nfp6000_init(struct nfp_cpp *cpp) ret = nfp_enable_bars(desc); if (ret != 0) { - PMD_DRV_LOG(ERR, "Enable bars failed"); + PMD_DRV_LOG(ERR, "Enable bars failed."); return -1; } @@ -1018,7 +1018,7 @@ nfp_cpp_from_nfp6000_pcie(struct rte_pci_device *pci_dev, if (NFP_CPP_INTERFACE_CHANNEL_of(interface) != NFP_CPP_INTERFACE_CHANNEL_PEROPENER) { - PMD_DRV_LOG(ERR, "Interface channel is not right"); + PMD_DRV_LOG(ERR, "Interface channel is not right."); free(nfp); return NULL; } @@ -1026,7 +1026,7 @@ nfp_cpp_from_nfp6000_pcie(struct rte_pci_device *pci_dev, /* Probe for all the common NFP devices */ cpp = nfp_cpp_from_device_name(pci_dev, nfp, driver_lock_needed); if (cpp == NULL) { - PMD_DRV_LOG(ERR, "Get cpp from operation failed"); + PMD_DRV_LOG(ERR, "Get cpp from operation failed."); free(nfp); return NULL; } diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c index 0e6045f2f0..dfc6d4613a 100644 --- a/drivers/net/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c @@ -344,7 +344,7 @@ nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, err = cpp->op->area_init(area, target_id, target_addr, size); if (err < 0) { - PMD_DRV_LOG(ERR, "Area init op failed"); + PMD_DRV_LOG(ERR, "Area init op failed."); free(area); return NULL; } @@ -413,12 +413,12 @@ nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, area = nfp_cpp_area_alloc(cpp, destination, address, size); if (area == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate CPP area"); + PMD_DRV_LOG(ERR, "Failed to allocate CPP area."); return NULL; } if (nfp_cpp_area_acquire(area) != 0) { - PMD_DRV_LOG(ERR, "Failed to acquire CPP area"); + PMD_DRV_LOG(ERR, "Failed to acquire CPP area."); nfp_cpp_area_free(area); return NULL; } @@ -469,7 +469,7 @@ nfp_cpp_area_acquire(struct nfp_cpp_area *area) if (area->cpp->op->area_acquire != NULL) { int err = area->cpp->op->area_acquire(area); if (err < 0) { - PMD_DRV_LOG(ERR, "Area acquire op failed"); + PMD_DRV_LOG(ERR, "Area acquire op failed."); return -1; } } @@ -950,14 +950,14 @@ nfp_cpp_alloc(struct rte_pci_device *pci_dev, */ err = cpp->op->init(cpp); if (err < 0) { - PMD_DRV_LOG(ERR, "NFP interface initialization failed"); + PMD_DRV_LOG(ERR, "NFP interface initialization failed."); free(cpp); return NULL; } err = nfp_cpp_model_autodetect(cpp, &cpp->model); if (err < 0) { - PMD_DRV_LOG(ERR, "NFP model detection failed"); + PMD_DRV_LOG(ERR, "NFP model detection failed."); free(cpp); return NULL; } @@ -967,7 +967,7 @@ nfp_cpp_alloc(struct rte_pci_device *pci_dev, xpb_addr = 0x000a0000 + (target * 4); err = nfp_xpb_readl(cpp, xpb_addr, &cpp->imb_cat_table[target]); if (err < 0) { - PMD_DRV_LOG(ERR, "Can't read CPP mapping from device"); + PMD_DRV_LOG(ERR, "Can not read CPP mapping from device."); free(cpp); return NULL; } @@ -975,7 +975,7 @@ nfp_cpp_alloc(struct rte_pci_device *pci_dev, err = nfp_cpp_set_mu_locality_lsb(cpp); if (err < 0) { - PMD_DRV_LOG(ERR, "Can't calculate MU locality bit offset"); + PMD_DRV_LOG(ERR, "Can not calculate MU locality bit offset."); free(cpp); return NULL; } @@ -1050,7 +1050,7 @@ nfp_cpp_read(struct nfp_cpp *cpp, area = nfp_cpp_area_alloc_acquire(cpp, destination, offset, length); if (area == NULL) { - PMD_DRV_LOG(ERR, "Area allocation/acquire failed for read"); + PMD_DRV_LOG(ERR, "Area allocation/acquire failed for read."); return -EACCES; } @@ -1089,7 +1089,7 @@ nfp_cpp_write(struct nfp_cpp *cpp, area = nfp_cpp_area_alloc_acquire(cpp, destination, offset, length); if (area == NULL) { - PMD_DRV_LOG(ERR, "Area allocation/acquire failed for write"); + PMD_DRV_LOG(ERR, "Area allocation/acquire failed for write."); return -EACCES; } @@ -1155,7 +1155,7 @@ nfp_cpp_map_area(struct nfp_cpp *cpp, *area = nfp_cpp_area_alloc_acquire(cpp, cpp_id, addr, size); if (*area == NULL) { - PMD_DRV_LOG(ERR, "Area allocation/acquire failed for map"); + PMD_DRV_LOG(ERR, "Area allocation/acquire failed for map."); goto err_eio; } diff --git a/drivers/net/nfp/nfpcore/nfp_elf.c b/drivers/net/nfp/nfpcore/nfp_elf.c index cfa59ed16d..12a9da0fa0 100644 --- a/drivers/net/nfp/nfpcore/nfp_elf.c +++ b/drivers/net/nfp/nfpcore/nfp_elf.c @@ -627,7 +627,7 @@ nfp_elf_populate_fw_mip(struct nfp_elf *ectx, first_entry = rte_le_to_cpu_32(mip->first_entry); if (mip->signature != NFP_MIP_SIGNATURE) { - PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x", + PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x.", rte_le_to_cpu_32(mip->signature)); return -EINVAL; } @@ -1066,7 +1066,7 @@ nfp_elf_get_fw_version(uint32_t *fw_version, struct nfp_elf *elf; if (rte_firmware_read(fw_name, &fw_buf, &fsize) != 0) { - PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name); + PMD_DRV_LOG(ERR, "Firmware %s not found!", fw_name); return -ENOENT; } diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c index c334202bd7..5240de44fb 100644 --- a/drivers/net/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c @@ -110,12 +110,12 @@ nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, key = val + strlen(val) + 1) { val = key + strlen(key) + 1; if (val >= end) { - PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value"); + PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value."); return -EINVAL; } if (val + strlen(val) + 1 > end) { - PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value"); + PMD_DRV_LOG(ERR, "Bad HWINFO - overflowing value."); return -EINVAL; } } @@ -133,7 +133,7 @@ nfp_hwinfo_db_validate(struct nfp_hwinfo *db, size = db->size; if (size > len) { - PMD_DRV_LOG(ERR, "Unsupported hwinfo size %u > %u", size, len); + PMD_DRV_LOG(ERR, "Unsupported hwinfo size %u > %u.", size, len); return -EINVAL; } @@ -141,7 +141,7 @@ nfp_hwinfo_db_validate(struct nfp_hwinfo *db, new_crc = nfp_crc32_posix((char *)db, size); crc = (uint32_t *)(db->start + size); if (new_crc != *crc) { - PMD_DRV_LOG(ERR, "CRC mismatch, calculated %#x, expected %#x", + PMD_DRV_LOG(ERR, "CRC mismatch, calculated %#x, expected %#x.", new_crc, *crc); return -EINVAL; } @@ -162,7 +162,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); if (res == NULL) { - PMD_DRV_LOG(ERR, "HWInfo - acquire resource failed"); + PMD_DRV_LOG(ERR, "HWInfo - acquire resource failed."); return NULL; } @@ -181,7 +181,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); if (err != (int)*cpp_size) { - PMD_DRV_LOG(ERR, "HWInfo - CPP read error %d", err); + PMD_DRV_LOG(ERR, "HWInfo - CPP read error %d.", err); goto exit_free; } @@ -190,7 +190,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, goto exit_free; if (header->version != NFP_HWINFO_VERSION_2) { - PMD_DRV_LOG(ERR, "Unknown HWInfo version: %#08x", + PMD_DRV_LOG(ERR, "Unknown HWInfo version: %#08x.", header->version); goto exit_free; } @@ -223,7 +223,7 @@ nfp_hwinfo_fetch(struct nfp_cpp *cpp, nanosleep(&wait, NULL); if (count++ > 200) { /* 10ms * 200 = 2s */ - PMD_DRV_LOG(ERR, "NFP access error"); + PMD_DRV_LOG(ERR, "NFP access error."); return NULL; } } diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c index 98d1d19047..16b94e6c10 100644 --- a/drivers/net/nfp/nfpcore/nfp_mip.c +++ b/drivers/net/nfp/nfpcore/nfp_mip.c @@ -21,18 +21,18 @@ nfp_mip_try_read(struct nfp_cpp *cpp, ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); if (ret != sizeof(*mip)) { - PMD_DRV_LOG(ERR, "Failed to read MIP data"); + PMD_DRV_LOG(ERR, "Failed to read MIP data."); return -EIO; } if (mip->signature != NFP_MIP_SIGNATURE) { - PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x", + PMD_DRV_LOG(ERR, "Incorrect MIP signature %#08x.", rte_le_to_cpu_32(mip->signature)); return -EINVAL; } if (mip->mip_version != NFP_MIP_VERSION) { - PMD_DRV_LOG(ERR, "Unsupported MIP version %d", + PMD_DRV_LOG(ERR, "Unsupported MIP version %d.", rte_le_to_cpu_32(mip->mip_version)); return -EINVAL; } @@ -88,7 +88,7 @@ nfp_mip_open(struct nfp_cpp *cpp) err = nfp_mip_read_resource(cpp, mip); if (err != 0) { - PMD_DRV_LOG(ERR, "Failed to read MIP resource"); + PMD_DRV_LOG(ERR, "Failed to read MIP resource."); free(mip); return NULL; } diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c index 2f07fcd6c1..c808af2dab 100644 --- a/drivers/net/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/nfp/nfpcore/nfp_nffw.c @@ -175,7 +175,7 @@ nfp_nffw_info_open(struct nfp_cpp *cpp) state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); if (state->res == NULL) { - PMD_DRV_LOG(ERR, "NFFW - acquire resource failed"); + PMD_DRV_LOG(ERR, "NFFW - acquire resource failed."); goto err_free; } @@ -188,7 +188,7 @@ nfp_nffw_info_open(struct nfp_cpp *cpp) nfp_resource_address(state->res), fwinf, sizeof(*fwinf)); if (err < (int)sizeof(*fwinf)) { - PMD_DRV_LOG(ERR, "NFFW - CPP read error %d", err); + PMD_DRV_LOG(ERR, "NFFW - CPP read error %d.", err); goto err_release; } diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c index 32f092eda1..9837b3354b 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/nfp/nfpcore/nfp_nsp.c @@ -188,7 +188,7 @@ nfp_nsp_print_extended_error(uint32_t ret_val) for (i = 0; i < RTE_DIM(nsp_errors); i++) if (ret_val == nsp_errors[i].code) - PMD_DRV_LOG(ERR, "err msg: %s", nsp_errors[i].msg); + PMD_DRV_LOG(ERR, "Err msg: %s.", nsp_errors[i].msg); } static int @@ -205,12 +205,12 @@ nfp_nsp_check(struct nfp_nsp *state) err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); if (err < 0) { - PMD_DRV_LOG(ERR, "NSP - CPP readq failed %d", err); + PMD_DRV_LOG(ERR, "NSP - CPP readq failed %d.", err); return err; } if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { - PMD_DRV_LOG(ERR, "Cannot detect NFP Service Processor"); + PMD_DRV_LOG(ERR, "Can not detect NFP Service Processor."); return -ENODEV; } @@ -218,7 +218,7 @@ nfp_nsp_check(struct nfp_nsp *state) state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); if (state->ver.major > NSP_MAJOR || state->ver.minor < NSP_MINOR) { - PMD_DRV_LOG(ERR, "Unsupported ABI %hu.%hu", state->ver.major, + PMD_DRV_LOG(ERR, "Unsupported ABI %hu.%hu.", state->ver.major, state->ver.minor); return -EINVAL; } @@ -246,7 +246,7 @@ nfp_nsp_open(struct nfp_cpp *cpp) res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); if (res == NULL) { - PMD_DRV_LOG(ERR, "NSP - resource acquire failed"); + PMD_DRV_LOG(ERR, "NSP - resource acquire failed."); return NULL; } @@ -262,7 +262,7 @@ nfp_nsp_open(struct nfp_cpp *cpp) err = nfp_nsp_check(state); if (err != 0) { - PMD_DRV_LOG(DEBUG, "NSP - check failed"); + PMD_DRV_LOG(DEBUG, "NSP - check failed."); nfp_nsp_close(state); return NULL; } @@ -313,7 +313,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, for (;;) { err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); if (err < 0) { - PMD_DRV_LOG(ERR, "NSP - CPP readq failed"); + PMD_DRV_LOG(ERR, "NSP - CPP readq failed."); return err; } @@ -365,7 +365,7 @@ nfp_nsp_command_real(struct nfp_nsp *state, err = nfp_nsp_check(state); if (err != 0) { - PMD_DRV_LOG(ERR, "Check NSP command failed"); + PMD_DRV_LOG(ERR, "Check NSP command failed."); return err; } @@ -390,7 +390,7 @@ nfp_nsp_command_real(struct nfp_nsp *state, err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, NSP_COMMAND_START, 0); if (err != 0) { - PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to start", + PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to start.", err, arg->code); return err; } @@ -399,7 +399,7 @@ nfp_nsp_command_real(struct nfp_nsp *state, err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0); if (err != 0) { - PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to complete", + PMD_DRV_LOG(ERR, "Error %d waiting for code %#04x to complete.", err, arg->code); return err; } @@ -415,7 +415,7 @@ nfp_nsp_command_real(struct nfp_nsp *state, err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err != 0) { if (!arg->error_quiet) - PMD_DRV_LOG(ERR, "Result (error) code set: %d (%d) command: %d", + PMD_DRV_LOG(ERR, "Result (error) code set: %d (%d) command: %d.", -err, (int)ret_val, arg->code); if (arg->error_cb != 0) @@ -477,7 +477,7 @@ nfp_nsp_command_buf_def(struct nfp_nsp *nsp, if (!FIELD_FIT(NSP_BUFFER_CPP, cpp_id >> 8) || !FIELD_FIT(NSP_BUFFER_ADDRESS, cpp_buf)) { - PMD_DRV_LOG(ERR, "Buffer out of reach %#08x %#016lx", + PMD_DRV_LOG(ERR, "Buffer out of reach %#08x %#016lx.", cpp_id, cpp_buf); return -EINVAL; } @@ -487,7 +487,7 @@ nfp_nsp_command_buf_def(struct nfp_nsp *nsp, ret = nfp_nsp_command_real(nsp, &arg->arg); if (ret < 0) { if (!arg->arg.error_quiet) - PMD_DRV_LOG(ERR, "NSP command failed"); + PMD_DRV_LOG(ERR, "NSP command failed."); return ret; } @@ -516,7 +516,7 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_cpp *cpp = nsp->cpp; if (nsp->ver.minor < 13) { - PMD_DRV_LOG(ERR, "NSP: Code %#04x with buffer not supported ABI %hu.%hu)", + PMD_DRV_LOG(ERR, "NSP: Code %#04x with buffer not supported ABI %hu.%hu).", arg->arg.code, nsp->ver.major, nsp->ver.minor); return -EOPNOTSUPP; } @@ -531,7 +531,7 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, size = FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M + FIELD_GET(NSP_DFLT_BUFFER_SIZE_4KB, reg) * SZ_4K; if (size < max_size) { - PMD_DRV_LOG(ERR, "NSP: default buffer too small for command %#04x (%zu < %zu)", + PMD_DRV_LOG(ERR, "NSP: default buffer too small for command %#04x (%zu < %zu).", arg->arg.code, size, max_size); return -EINVAL; } @@ -563,7 +563,7 @@ nfp_nsp_wait(struct nfp_nsp *state) } if (err != 0) - PMD_DRV_LOG(ERR, "NSP failed to respond %d", err); + PMD_DRV_LOG(ERR, "NSP failed to respond %d.", err); return err; } @@ -616,9 +616,9 @@ nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, return; if (major >= RTE_DIM(major_msg)) - PMD_DRV_LOG(INFO, "FW loading status: %x", ret_val); + PMD_DRV_LOG(INFO, "FW loading status: %x.", ret_val); else if (minor >= RTE_DIM(minor_msg)) - PMD_DRV_LOG(INFO, "%s, reason code: %d", major_msg[major], minor); + PMD_DRV_LOG(INFO, "%s, reason code: %d.", major_msg[major], minor); else PMD_DRV_LOG(INFO, "%s%c %s", major_msg[major], minor != 0 ? ',' : '.', minor_msg[minor]); @@ -808,6 +808,102 @@ nfp_nsp_hwinfo_lookup_real(struct nfp_nsp *state, return nfp_nsp_command_buf(state, &hwinfo_lookup); } +static int +nfp_nsp_read_module_eeprom_real(struct nfp_nsp *state, + void *buf, + uint32_t size) +{ + struct nfp_nsp_command_buf_arg module_eeprom = { + { + .code = SPCODE_READ_SFF_EEPROM, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &module_eeprom); +} + +int +nfp_nsp_read_module_eeprom(struct nfp_nsp *state, + int eth_index, + uint32_t offset, + void *data, + uint32_t len, + uint32_t *read_len) +{ + int ret; + int bufsz; + struct eeprom_buf { + uint8_t metalen; + rte_le16_t length; + rte_le16_t offset; + rte_le16_t readlen; + uint8_t eth_index; + uint8_t data[]; + } __rte_packed * buf; + + /* Buffer must be large enough and rounded to the next block size. */ + bufsz = sizeof(*(buf)) + sizeof((buf)->data[0]) * + (RTE_ALIGN_CEIL(len, NSP_SFF_EEPROM_BLOCK_LEN)); + buf = calloc(1, bufsz); + if (buf == NULL) + return -ENOMEM; + + buf->metalen = offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN; + buf->length = rte_cpu_to_le_16(len); + buf->offset = rte_cpu_to_le_16(offset); + buf->eth_index = eth_index; + + ret = nfp_nsp_read_module_eeprom_real(state, buf, bufsz); + if (ret != 0) + goto free_exit; + + if (rte_le_to_cpu_16(buf->readlen) < len) { + ret = -EIO; + goto free_exit; + } + + if (len != 0) + memcpy(data, buf->data, len); + + *read_len = len; + +free_exit: + free(buf); + return ret; +} + +int +nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, + void *buf, + uint32_t size) +{ + int ret; + uint32_t size_tmp; + + if (!nfp_nsp_has_hwinfo_lookup(state)) { + PMD_DRV_LOG(ERR, "NSP HWinfo lookup not supported. Please update flash."); + return -EOPNOTSUPP; + } + + size_tmp = RTE_MIN(size, NFP_HWINFO_LOOKUP_SIZE); + + ret = nfp_nsp_hwinfo_lookup_real(state, buf, size, false); + if (ret != 0) + return ret; + + if (strnlen(buf, size_tmp) == size_tmp) { + PMD_DRV_LOG(ERR, "NSP HWinfo value not NULL terminated."); + return -EINVAL; + } + + return 0; +} + int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, @@ -818,7 +914,7 @@ nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, size_t min_size; if (strnlen(default_val, size) == size) { - PMD_DRV_LOG(ERR, "NSP HWinfo default value not NULL terminated"); + PMD_DRV_LOG(ERR, "NSP HWinfo default value not NULL terminated."); return -EINVAL; } @@ -831,12 +927,12 @@ nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, if (ret == -ENOENT) goto default_return; - PMD_DRV_LOG(ERR, "NSP HWinfo lookup failed: %d", ret); + PMD_DRV_LOG(ERR, "NSP HWinfo lookup failed: %d.", ret); return ret; } if (strnlen(buf, min_size) == min_size) { - PMD_DRV_LOG(ERR, "NSP HWinfo value not NULL terminated"); + PMD_DRV_LOG(ERR, "NSP HWinfo value not NULL terminated."); return -EINVAL; } diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h index cfb5066fc9..6230a84e34 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -8,6 +8,11 @@ #include "nfp_cpp.h" +/* EEPROM byte offsets */ +#define SFP_SFF8472_COMPLIANCE 0x5e +#define SFP_SFF_REV_COMPLIANCE 1 +#define NSP_SFF_EEPROM_BLOCK_LEN 8 + /* Defines the valid values of the 'abi_drv_reset' hwinfo key */ #define NFP_NSP_DRV_RESET_DISK 0 #define NFP_NSP_DRV_RESET_ALWAYS 1 @@ -211,6 +216,7 @@ int nfp_eth_set_speed(struct nfp_nsp *nsp, uint32_t speed); int nfp_eth_set_split(struct nfp_nsp *nsp, uint32_t lanes); int nfp_eth_set_tx_pause(struct nfp_nsp *nsp, bool tx_pause); int nfp_eth_set_rx_pause(struct nfp_nsp *nsp, bool rx_pause); +int nfp_eth_set_idmode(struct nfp_cpp *cpp, uint32_t idx, bool is_on); /* NSP static information */ struct nfp_nsp_identify { @@ -238,8 +244,12 @@ int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, uint32_t *val); bool nfp_nsp_fw_loaded(struct nfp_nsp *state); int nfp_nsp_load_stored_fw(struct nfp_nsp *state); +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, uint32_t size); int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, size_t size, const char *default_val); +int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, + uint32_t offset, void *data, + uint32_t len, uint32_t *read_len); /* The buf used to receive bitmap of link modes */ struct nfp_eth_media_buf { diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c index 46fa5467de..b1cce03e70 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c +++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c @@ -36,7 +36,7 @@ nfp_nsp_identify(struct nfp_nsp *nsp) memset(ni, 0, sizeof(*ni)); ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); if (ret < 0) { - PMD_DRV_LOG(ERR, "reading bsp version failed %d", ret); + PMD_DRV_LOG(ERR, "Reading BSP version failed %d.", ret); goto exit_free; } diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c index d1b43a75f0..404690d05f 100644 --- a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c @@ -44,6 +44,7 @@ #define NSP_ETH_CTRL_SET_LANES RTE_BIT64(5) #define NSP_ETH_CTRL_SET_ANEG RTE_BIT64(6) #define NSP_ETH_CTRL_SET_FEC RTE_BIT64(7) +#define NSP_ETH_CTRL_SET_IDMODE RTE_BIT64(8) #define NSP_ETH_CTRL_SET_TX_PAUSE RTE_BIT64(10) #define NSP_ETH_CTRL_SET_RX_PAUSE RTE_BIT64(11) @@ -223,7 +224,7 @@ nfp_eth_calc_port_geometry(struct nfp_eth_table *table) if (table->ports[i].label_subport == table->ports[j].label_subport) - PMD_DRV_LOG(DEBUG, "Port %d subport %d is a duplicate", + PMD_DRV_LOG(DEBUG, "Port %d subport %d is a duplicate.", table->ports[i].label_port, table->ports[i].label_subport); @@ -267,7 +268,7 @@ nfp_eth_read_ports_real(struct nfp_nsp *nsp) memset(entries, 0, NSP_ETH_TABLE_SIZE); ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { - PMD_DRV_LOG(ERR, "Reading port table failed %d", ret); + PMD_DRV_LOG(ERR, "Reading port table failed %d.", ret); goto err; } @@ -281,7 +282,7 @@ nfp_eth_read_ports_real(struct nfp_nsp *nsp) * above. */ if (ret != 0 && ret != cnt) { - PMD_DRV_LOG(ERR, "Table entry count (%d) unmatch entries present (%d)", + PMD_DRV_LOG(ERR, "Table entry count (%d) unmatch entries present (%d).", ret, cnt); goto err; } @@ -362,12 +363,12 @@ nfp_eth_config_start(struct nfp_cpp *cpp, ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { - PMD_DRV_LOG(ERR, "Reading port table failed %d", ret); + PMD_DRV_LOG(ERR, "Reading port table failed %d.", ret); goto err; } if ((entries[idx].port & NSP_ETH_PORT_LANES_MASK) == 0) { - PMD_DRV_LOG(ERR, "Trying to set port state on disabled port %d", idx); + PMD_DRV_LOG(ERR, "Trying to set port state on disabled port %d.", idx); goto err; } @@ -536,7 +537,7 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, * codes were initially not populated correctly. */ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { - PMD_DRV_LOG(ERR, "set operations not supported, please update flash"); + PMD_DRV_LOG(ERR, "Set operations not supported, please update flash."); return -EOPNOTSUPP; } @@ -661,7 +662,7 @@ nfp_eth_set_speed(struct nfp_nsp *nsp, rate = nfp_eth_speed2rate(speed); if (rate == RATE_INVALID) { - PMD_DRV_LOG(ERR, "Could not find matching lane rate for speed %u", speed); + PMD_DRV_LOG(ERR, "Could not find matching lane rate for speed %u.", speed); return -EINVAL; } @@ -736,3 +737,38 @@ nfp_eth_set_rx_pause(struct nfp_nsp *nsp, return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_RX_PAUSE, rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE); } + +int +nfp_eth_set_idmode(struct nfp_cpp *cpp, + uint32_t idx, + bool is_on) +{ + uint64_t reg; + struct nfp_nsp *nsp; + union eth_table_entry *entries; + + nsp = nfp_eth_config_start(cpp, idx); + if (nsp == NULL) + return -EIO; + + /* + * Older ABI versions did support this feature, however this has only + * been reliable since ABI 32. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 32) { + PMD_DRV_LOG(ERR, "Operation only supported on ABI 32 or newer."); + nfp_eth_config_cleanup_end(nsp); + return -ENOTSUP; + } + + entries = nfp_nsp_config_entries(nsp); + + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_SET_IDMODE; + reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, is_on); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + + return nfp_eth_config_commit_end(nsp); +} diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c index b05144036a..6437a78852 100644 --- a/drivers/net/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/nfp/nfpcore/nfp_resource.c @@ -69,7 +69,7 @@ nfp_cpp_resource_find(struct nfp_cpp *cpp, /* Search for a matching entry */ if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8) == 0) { - PMD_DRV_LOG(ERR, "Grabbing device lock not supported"); + PMD_DRV_LOG(ERR, "Grabbing device lock not supported."); return -EOPNOTSUPP; } @@ -109,19 +109,19 @@ nfp_resource_try_acquire(struct nfp_cpp *cpp, int err; if (nfp_cpp_mutex_lock(dev_mutex) != 0) { - PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex lock failed"); + PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex lock failed."); return -EINVAL; } err = nfp_cpp_resource_find(cpp, res); if (err != 0) { - PMD_DRV_LOG(ERR, "RESOURCE - CPP resource find failed"); + PMD_DRV_LOG(ERR, "RESOURCE - CPP resource find failed."); goto err_unlock_dev; } err = nfp_cpp_mutex_trylock(res->mutex); if (err != 0) { - PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex trylock failed"); + PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex trylock failed."); goto err_res_mutex_free; } @@ -173,7 +173,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, NFP_RESOURCE_TBL_BASE, NFP_RESOURCE_TBL_KEY); if (dev_mutex == NULL) { - PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex alloc failed"); + PMD_DRV_LOG(ERR, "RESOURCE - CPP mutex alloc failed."); goto err_free; } @@ -185,12 +185,12 @@ nfp_resource_acquire(struct nfp_cpp *cpp, if (err == 0) break; if (err != -EBUSY) { - PMD_DRV_LOG(ERR, "RESOURCE - try acquire failed"); + PMD_DRV_LOG(ERR, "RESOURCE - try acquire failed."); goto mutex_free; } if (count++ > 1000) { /* 1ms * 1000 = 1s */ - PMD_DRV_LOG(ERR, "Error: resource %s timed out", name); + PMD_DRV_LOG(ERR, "Error: resource %s timed out.", name); goto mutex_free; } diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c index 7d9cfb0d42..9f0d17cd0a 100644 --- a/drivers/net/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c @@ -272,7 +272,7 @@ nfp_rtsym_size(const struct nfp_rtsym *sym) { switch (sym->type) { case NFP_RTSYM_TYPE_NONE: - PMD_DRV_LOG(ERR, "The type of rtsym '%s' is NONE", sym->name); + PMD_DRV_LOG(ERR, "The type of rtsym '%s' is NONE.", sym->name); return 0; case NFP_RTSYM_TYPE_OBJECT: /* FALLTHROUGH */ @@ -281,7 +281,7 @@ nfp_rtsym_size(const struct nfp_rtsym *sym) case NFP_RTSYM_TYPE_ABS: return sizeof(uint64_t); default: - PMD_DRV_LOG(ERR, "Unknown RTSYM type %u", sym->type); + PMD_DRV_LOG(ERR, "Unknown RTSYM type %u.", sym->type); return 0; } } @@ -296,7 +296,7 @@ nfp_rtsym_to_dest(struct nfp_cpp *cpp, uint64_t *addr) { if (sym->type != NFP_RTSYM_TYPE_OBJECT) { - PMD_DRV_LOG(ERR, "rtsym '%s': direct access to non-object rtsym", + PMD_DRV_LOG(ERR, "RTSYM '%s': direct access to non-object rtsym.", sym->name); return -EINVAL; } @@ -314,7 +314,7 @@ nfp_rtsym_to_dest(struct nfp_cpp *cpp, *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, sym->domain); } else { - PMD_DRV_LOG(ERR, "rtsym '%s': unhandled target encoding: %d", + PMD_DRV_LOG(ERR, "RTSYM '%s': unhandled target encoding: %d.", sym->name, sym->target); return -EINVAL; } @@ -338,7 +338,7 @@ nfp_rtsym_read_real(struct nfp_cpp *cpp, uint64_t sym_size = nfp_rtsym_size(sym); if (offset >= sym_size) { - PMD_DRV_LOG(ERR, "rtsym '%s' read out of bounds", sym->name); + PMD_DRV_LOG(ERR, "RTSYM '%s' read out of bounds.", sym->name); return -ENXIO; } @@ -387,7 +387,7 @@ nfp_rtsym_readl_real(struct nfp_cpp *cpp, uint32_t cpp_id; if (offset + 4 > nfp_rtsym_size(sym)) { - PMD_DRV_LOG(ERR, "rtsym '%s': readl out of bounds", sym->name); + PMD_DRV_LOG(ERR, "RTSYM '%s': readl out of bounds.", sym->name); return -ENXIO; } @@ -420,7 +420,7 @@ nfp_rtsym_readq_real(struct nfp_cpp *cpp, uint32_t cpp_id; if (offset + 8 > nfp_rtsym_size(sym)) { - PMD_DRV_LOG(ERR, "rtsym '%s': readq out of bounds", sym->name); + PMD_DRV_LOG(ERR, "RTSYM '%s': readq out of bounds.", sym->name); return -ENXIO; } @@ -461,7 +461,7 @@ nfp_rtsym_write_real(struct nfp_cpp *cpp, uint64_t sym_size = nfp_rtsym_size(sym); if (offset > sym_size) { - PMD_DRV_LOG(ERR, "rtsym '%s' write out of bounds", sym->name); + PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds.", sym->name); return -ENXIO; } @@ -498,7 +498,7 @@ nfp_rtsym_writel_real(struct nfp_cpp *cpp, uint32_t cpp_id; if (offset + 4 > nfp_rtsym_size(sym)) { - PMD_DRV_LOG(ERR, "rtsym '%s' write out of bounds", sym->name); + PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds.", sym->name); return -ENXIO; } @@ -531,7 +531,7 @@ nfp_rtsym_writeq_real(struct nfp_cpp *cpp, uint32_t cpp_id; if (offset + 8 > nfp_rtsym_size(sym)) { - PMD_DRV_LOG(ERR, "rtsym '%s' write out of bounds", sym->name); + PMD_DRV_LOG(ERR, "RTSYM '%s' write out of bounds.", sym->name); return -ENXIO; } @@ -593,7 +593,7 @@ nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val); break; default: - PMD_DRV_LOG(ERR, "rtsym '%s' unsupported size: %#lx", + PMD_DRV_LOG(ERR, "RTSYM '%s' unsupported size: %#lx.", name, sym->size); err = -EINVAL; break; @@ -648,7 +648,7 @@ nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value); break; default: - PMD_DRV_LOG(ERR, "rtsym '%s' unsupported size: %#lx", + PMD_DRV_LOG(ERR, "RTSYM '%s' unsupported size: %#lx.", name, sym_size); err = -EINVAL; break; @@ -672,26 +672,26 @@ nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, sym = nfp_rtsym_lookup(rtbl, name); if (sym == NULL) { - PMD_DRV_LOG(ERR, "Symbol lookup fails for %s", name); + PMD_DRV_LOG(ERR, "Symbol lookup fails for %s.", name); return NULL; } ret = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &cpp_id, &addr); if (ret != 0) { - PMD_DRV_LOG(ERR, "rtsym '%s': mapping failed", name); + PMD_DRV_LOG(ERR, "RTSYM '%s': mapping failed.", name); return NULL; } if (sym->size < min_size) { - PMD_DRV_LOG(ERR, "Symbol %s too small (%" PRIu64 " < %u)", name, + PMD_DRV_LOG(ERR, "Symbol %s too small (%" PRIu64 " < %u).", name, sym->size, min_size); return NULL; } mem = nfp_cpp_map_area(rtbl->cpp, cpp_id, addr + offset, sym->size, area); if (mem == NULL) { - PMD_DRV_LOG(ERR, "Failed to map symbol %s", name); + PMD_DRV_LOG(ERR, "Failed to map symbol %s.", name); return NULL; } @@ -741,13 +741,13 @@ nfp_rtsym_readl_indirect(struct nfp_rtsym_table *rtbl, aux_sym = nfp_rtsym_lookup(rtbl, aux_name); if (aux_sym == NULL) { - PMD_DRV_LOG(ERR, "Failed to find symbol %s", aux_name); + PMD_DRV_LOG(ERR, "Failed to find symbol %s.", aux_name); return -ENOENT; } sym = nfp_rtsym_lookup(rtbl, name); if (sym == NULL) { - PMD_DRV_LOG(ERR, "Failed to find symbol %s", name); + PMD_DRV_LOG(ERR, "Failed to find symbol %s.", name); return -ENOENT; } @@ -791,13 +791,13 @@ nfp_rtsym_writel_indirect(struct nfp_rtsym_table *rtbl, aux_sym = nfp_rtsym_lookup(rtbl, aux_name); if (aux_sym == NULL) { - PMD_DRV_LOG(ERR, "Failed to find symbol %s", aux_name); + PMD_DRV_LOG(ERR, "Failed to find symbol %s.", aux_name); return -ENOENT; } sym = nfp_rtsym_lookup(rtbl, name); if (sym == NULL) { - PMD_DRV_LOG(ERR, "Failed to find symbol %s", name); + PMD_DRV_LOG(ERR, "Failed to find symbol %s.", name); return -ENOENT; } diff --git a/drivers/net/nfp/nfpcore/nfp_sync.c b/drivers/net/nfp/nfpcore/nfp_sync.c index 686cdf8eb1..1b594257c6 100644 --- a/drivers/net/nfp/nfpcore/nfp_sync.c +++ b/drivers/net/nfp/nfpcore/nfp_sync.c @@ -91,11 +91,11 @@ nfp_sync_free(struct nfp_sync *sync) } if (sync->process.avail != NFP_SYNC_ELEMENT_MAX) - PMD_DRV_LOG(ERR, "Sync process handle residue"); + PMD_DRV_LOG(ERR, "Sync process handle residue."); for (i = 0; i < NFP_SYNC_PCI_MAX; i++) { if (sync->pci[i].avail != NFP_SYNC_ELEMENT_MAX) - PMD_DRV_LOG(ERR, "Sync %s pci handle residue", + PMD_DRV_LOG(ERR, "Sync %s pci handle residue.", sync->pci[i].pci_name); } @@ -206,7 +206,7 @@ nfp_sync_process_inner_handle_alloc(struct nfp_sync *sync, handle = nfp_sync_common_handle_alloc(&sync->process, magic, size); if (handle == NULL) - PMD_DRV_LOG(ERR, "Process handle alloc failed"); + PMD_DRV_LOG(ERR, "Process handle alloc failed."); rte_spinlock_unlock(&sync->spinlock); @@ -280,7 +280,7 @@ nfp_sync_pci_inner_handle_alloc(struct nfp_sync *sync, handle = nfp_sync_common_handle_alloc(&sync->pci[pci_avail_id], magic, size); if (handle == NULL) - PMD_DRV_LOG(ERR, "PCI handle alloc failed"); + PMD_DRV_LOG(ERR, "PCI handle alloc failed."); rte_spinlock_unlock(&sync->spinlock); diff --git a/drivers/net/ngbe/base/ngbe_regs.h b/drivers/net/ngbe/base/ngbe_regs.h index 8a6776b0e6..b1295280a7 100644 --- a/drivers/net/ngbe/base/ngbe_regs.h +++ b/drivers/net/ngbe/base/ngbe_regs.h @@ -712,6 +712,8 @@ enum ngbe_5tuple_protocol { #define NGBE_MACRXFLT_CTL_PASS LS(3, 6, 0x3) #define NGBE_MACRXFLT_RXALL MS(31, 0x1) +#define NGBE_MAC_WDG_TIMEOUT 0x01100C + /****************************************************************************** * Statistic Registers ******************************************************************************/ diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index 6c45ffaad3..08e87471f6 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -263,6 +263,8 @@ ngbe_pf_reset_hw(struct ngbe_hw *hw) status = hw->mac.reset_hw(hw); ctrl_ext = rd32(hw, NGBE_PORTCTL); + /* let hardware know driver is loaded */ + ctrl_ext |= NGBE_PORTCTL_DRVLOAD; /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext |= NGBE_PORTCTL_RSTDONE; wr32(hw, NGBE_PORTCTL, ctrl_ext); @@ -584,41 +586,25 @@ ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } static void -ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +ngbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on) { - struct ngbe_hw *hw = ngbe_dev_hw(dev); - struct ngbe_rx_queue *rxq; - bool restart; - uint32_t rxcfg, rxbal, rxbah; - if (on) ngbe_vlan_hw_strip_enable(dev, queue); else ngbe_vlan_hw_strip_disable(dev, queue); +} - rxq = dev->data->rx_queues[queue]; - rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); - rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); - rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); - if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { - restart = (rxcfg & NGBE_RXCFG_ENA) && - !(rxcfg & NGBE_RXCFG_VLAN); - rxcfg |= NGBE_RXCFG_VLAN; - } else { - restart = (rxcfg & NGBE_RXCFG_ENA) && - (rxcfg & NGBE_RXCFG_VLAN); - rxcfg &= ~NGBE_RXCFG_VLAN; - } - rxcfg &= ~NGBE_RXCFG_ENA; +static void +ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); - if (restart) { - /* set vlan strip for ring */ - ngbe_dev_rx_queue_stop(dev, queue); - wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); - wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); - wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); - ngbe_dev_rx_queue_start(dev, queue); + if (!hw->adapter_stopped) { + PMD_DRV_LOG(ERR, "Please stop port first"); + return; } + + ngbe_vlan_strip_q_set(dev, queue, on); } static int @@ -844,9 +830,9 @@ ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) - ngbe_vlan_hw_strip_enable(dev, i); + ngbe_vlan_strip_q_set(dev, i, 1); else - ngbe_vlan_hw_strip_disable(dev, i); + ngbe_vlan_strip_q_set(dev, i, 0); } } @@ -908,6 +894,13 @@ ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) static int ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct ngbe_hw *hw = ngbe_dev_hw(dev); + + if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) { + PMD_DRV_LOG(ERR, "Please stop port first"); + return -EPERM; + } + ngbe_config_vlan_strip_on_all_queues(dev, mask); ngbe_vlan_offload_config(dev, mask); @@ -1277,6 +1270,9 @@ ngbe_dev_close(struct rte_eth_dev *dev) ngbe_dev_stop(dev); + /* Let firmware take over control of hardware */ + wr32m(hw, NGBE_PORTCTL, NGBE_PORTCTL_DRVLOAD, 0); + ngbe_dev_free_queues(dev); ngbe_set_pcie_master(hw, false); @@ -1507,6 +1503,7 @@ ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); struct ngbe_stat_mappings *stat_mappings = NGBE_DEV_STAT_MAPPINGS(dev); + struct ngbe_tx_queue *txq; uint32_t i, j; ngbe_read_stats_registers(hw, hw_stats); @@ -1559,6 +1556,11 @@ ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Tx Errors */ stats->oerrors = 0; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + stats->oerrors += txq->desc_error; + } + return 0; } @@ -1567,6 +1569,13 @@ ngbe_dev_stats_reset(struct rte_eth_dev *dev) { struct ngbe_hw *hw = ngbe_dev_hw(dev); struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + struct ngbe_tx_queue *txq; + uint32_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->desc_error = 0; + } /* HW registers are cleared on read */ hw->offset_loaded = 0; @@ -1923,6 +1932,7 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, bool link_up; int err; int wait = 1; + u32 reg; memset(&link, 0, sizeof(link)); link.link_status = RTE_ETH_LINK_DOWN; @@ -1980,8 +1990,13 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); } + /* Re configure MAC RX */ + reg = rd32(hw, NGBE_MACRXCFG); + wr32(hw, NGBE_MACRXCFG, reg); wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, NGBE_MACRXFLT_PROMISC); + reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); + wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); } return rte_eth_linkstatus_set(dev, &link); @@ -2169,6 +2184,19 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ngbe_hw *hw = ngbe_dev_hw(dev); struct ngbe_interrupt *intr = ngbe_dev_intr(dev); + eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0]; + if (!eicr) { + /* + * shared interrupt alert! + * make sure interrupts are enabled because the read will + * have disabled interrupts. + */ + if (!hw->adapter_stopped) + ngbe_enable_intr(dev); + return 0; + } + ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0] = 0; + /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index f3eb797d0c..8d31d47de9 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -113,6 +113,8 @@ tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts) for (i = 0; i < 4; ++i, ++txdp, ++pkts) { buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = NGBE_FRAME_SIZE_DFT; /* write data to descriptor */ txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr); @@ -133,6 +135,8 @@ tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts) buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = NGBE_FRAME_SIZE_DFT; /* write data to descriptor */ txdp->qw0 = cpu_to_le64(buf_dma_addr); @@ -555,6 +559,30 @@ ngbe_xmit_cleanup(struct ngbe_tx_queue *txq) return 0; } +static inline bool +ngbe_check_pkt_err(struct rte_mbuf *tx_pkt) +{ + uint32_t total_len = 0, nb_seg = 0; + struct rte_mbuf *mseg; + + mseg = tx_pkt; + do { + if (mseg->data_len == 0) + return true; + total_len += mseg->data_len; + nb_seg++; + mseg = mseg->next; + } while (mseg != NULL); + + if (tx_pkt->pkt_len != total_len || tx_pkt->pkt_len == 0) + return true; + + if (tx_pkt->nb_segs != nb_seg || tx_pkt->nb_segs > 64) + return true; + + return false; +} + uint16_t ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -599,6 +627,12 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { new_ctx = 0; tx_pkt = *tx_pkts++; + if (ngbe_check_pkt_err(tx_pkt)) { + rte_pktmbuf_free(tx_pkt); + txq->desc_error++; + continue; + } + pkt_len = tx_pkt->pkt_len; /* @@ -2067,6 +2101,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, ngbe_set_tx_function(dev, txq); txq->ops->reset(txq); + txq->desc_error = 0; dev->data->tx_queues[queue_idx] = txq; diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h index 7574db32d8..8534ec123a 100644 --- a/drivers/net/ngbe/ngbe_rxtx.h +++ b/drivers/net/ngbe/ngbe_rxtx.h @@ -375,6 +375,7 @@ struct ngbe_tx_queue { const struct ngbe_txq_ops *ops; /**< txq ops */ const struct rte_memzone *mz; + uint64_t desc_error; }; struct ngbe_txq_ops { diff --git a/drivers/net/ngbe/ngbe_rxtx_vec_neon.c b/drivers/net/ngbe/ngbe_rxtx_vec_neon.c index dcf12b7070..37075ea5e7 100644 --- a/drivers/net/ngbe/ngbe_rxtx_vec_neon.c +++ b/drivers/net/ngbe/ngbe_rxtx_vec_neon.c @@ -476,9 +476,13 @@ static inline void vtx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { - uint64x2_t descriptor = { - pkt->buf_iova + pkt->data_off, - (uint64_t)pkt->pkt_len << 45 | flags | pkt->data_len}; + uint16_t pkt_len = pkt->data_len; + + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = NGBE_FRAME_SIZE_DFT; + + uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, + (uint64_t)pkt_len << 45 | flags | pkt_len}; vst1q_u64((uint64_t *)(uintptr_t)txdp, descriptor); } diff --git a/drivers/net/ngbe/ngbe_rxtx_vec_sse.c b/drivers/net/ngbe/ngbe_rxtx_vec_sse.c index b128bd3a67..19c69cdfa6 100644 --- a/drivers/net/ngbe/ngbe_rxtx_vec_sse.c +++ b/drivers/net/ngbe/ngbe_rxtx_vec_sse.c @@ -563,9 +563,14 @@ static inline void vtx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { - __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 45 | - flags | pkt->data_len, - pkt->buf_iova + pkt->data_off); + uint16_t pkt_len = pkt->data_len; + __m128i descriptor; + + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = NGBE_FRAME_SIZE_DFT; + + descriptor = _mm_set_epi64x((uint64_t)pkt_len << 45 | flags | pkt_len, + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)(uintptr_t)txdp, descriptor); } diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c index d9e6716c30..fa72dfda8d 100644 --- a/drivers/net/ntnic/adapter/nt4ga_adapter.c +++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c @@ -212,19 +212,26 @@ static int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info) } } - nthw_rmc_t *p_nthw_rmc = nthw_rmc_new(); - if (p_nthw_rmc == NULL) { - NT_LOG(ERR, NTNIC, "Failed to allocate memory for RMC module"); - return -1; - } + const struct nt4ga_stat_ops *nt4ga_stat_ops = get_nt4ga_stat_ops(); - res = nthw_rmc_init(p_nthw_rmc, p_fpga, 0); - if (res) { - NT_LOG(ERR, NTNIC, "Failed to initialize RMC module"); - return -1; - } + if (nt4ga_stat_ops != NULL) { + /* Nt4ga Stat init/setup */ + res = nt4ga_stat_ops->nt4ga_stat_init(p_adapter_info); + + if (res != 0) { + NT_LOG(ERR, NTNIC, "%s: Cannot initialize the statistics module", + p_adapter_id_str); + return res; + } + + res = nt4ga_stat_ops->nt4ga_stat_setup(p_adapter_info); - nthw_rmc_unblock(p_nthw_rmc, false); + if (res != 0) { + NT_LOG(ERR, NTNIC, "%s: Cannot setup the statistics module", + p_adapter_id_str); + return res; + } + } return 0; } diff --git a/drivers/net/ntnic/adapter/nt4ga_stat/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat/nt4ga_stat.c new file mode 100644 index 0000000000..8fedfdcd04 --- /dev/null +++ b/drivers/net/ntnic/adapter/nt4ga_stat/nt4ga_stat.c @@ -0,0 +1,598 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include "ntlog.h" +#include "nt_util.h" +#include "nthw_drv.h" +#include "nthw_fpga.h" +#include "nthw_fpga_param_defs.h" +#include "nt4ga_adapter.h" +#include "ntnic_nim.h" +#include "flow_filter.h" +#include "ntnic_stat.h" +#include "ntnic_mod_reg.h" + +#define DEFAULT_MAX_BPS_SPEED 100e9 + +/* Inline timestamp format s pcap 32:32 bits. Convert to nsecs */ +static inline uint64_t timestamp2ns(uint64_t ts) +{ + return ((ts) >> 32) * 1000000000 + ((ts) & 0xffffffff); +} + +static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info, + nt4ga_stat_t *p_nt4ga_stat, + uint32_t *p_stat_dma_virtual); + +static int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info, nt4ga_stat_t *p_nt4ga_stat) +{ + nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat; + + p_nt4ga_stat->last_timestamp = timestamp2ns(*p_nthw_stat->mp_timestamp); + nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat, + p_nt4ga_stat->p_stat_dma_virtual); + + return 0; +} + +static int nt4ga_stat_init(struct adapter_info_s *p_adapter_info) +{ + const char *const p_adapter_id_str = p_adapter_info->mp_adapter_id_str; + fpga_info_t *fpga_info = &p_adapter_info->fpga_info; + nthw_fpga_t *p_fpga = fpga_info->mp_fpga; + nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat; + + if (p_nt4ga_stat) { + memset(p_nt4ga_stat, 0, sizeof(nt4ga_stat_t)); + + } else { + NT_LOG_DBGX(ERR, NTNIC, "%s: ERROR", p_adapter_id_str); + return -1; + } + + { + nthw_stat_t *p_nthw_stat = nthw_stat_new(); + + if (!p_nthw_stat) { + NT_LOG_DBGX(ERR, NTNIC, "%s: ERROR", p_adapter_id_str); + return -1; + } + + if (nthw_rmc_init(NULL, p_fpga, 0) == 0) { + nthw_rmc_t *p_nthw_rmc = nthw_rmc_new(); + + if (!p_nthw_rmc) { + nthw_stat_delete(p_nthw_stat); + NT_LOG(ERR, NTNIC, "%s: ERROR rmc allocation", p_adapter_id_str); + return -1; + } + + nthw_rmc_init(p_nthw_rmc, p_fpga, 0); + p_nt4ga_stat->mp_nthw_rmc = p_nthw_rmc; + + } else { + p_nt4ga_stat->mp_nthw_rmc = NULL; + } + + if (nthw_rpf_init(NULL, p_fpga, p_adapter_info->adapter_no) == 0) { + nthw_rpf_t *p_nthw_rpf = nthw_rpf_new(); + + if (!p_nthw_rpf) { + nthw_stat_delete(p_nthw_stat); + NT_LOG_DBGX(ERR, NTNIC, "%s: ERROR", p_adapter_id_str); + return -1; + } + + nthw_rpf_init(p_nthw_rpf, p_fpga, p_adapter_info->adapter_no); + p_nt4ga_stat->mp_nthw_rpf = p_nthw_rpf; + + } else { + p_nt4ga_stat->mp_nthw_rpf = NULL; + } + + p_nt4ga_stat->mp_nthw_stat = p_nthw_stat; + nthw_stat_init(p_nthw_stat, p_fpga, 0); + + p_nt4ga_stat->mn_rx_host_buffers = p_nthw_stat->m_nb_rx_host_buffers; + p_nt4ga_stat->mn_tx_host_buffers = p_nthw_stat->m_nb_tx_host_buffers; + + p_nt4ga_stat->mn_rx_ports = p_nthw_stat->m_nb_rx_ports; + p_nt4ga_stat->mn_tx_ports = p_nthw_stat->m_nb_tx_ports; + } + + return 0; +} + +static int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info) +{ + const int n_physical_adapter_no = p_adapter_info->adapter_no; + (void)n_physical_adapter_no; + nt4ga_stat_t *p_nt4ga_stat = &p_adapter_info->nt4ga_stat; + nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat; + + if (p_nt4ga_stat->mp_nthw_rmc) + nthw_rmc_block(p_nt4ga_stat->mp_nthw_rmc); + + if (p_nt4ga_stat->mp_nthw_rpf) + nthw_rpf_block(p_nt4ga_stat->mp_nthw_rpf); + + /* Allocate and map memory for fpga statistics */ + { + uint32_t n_stat_size = (uint32_t)(p_nthw_stat->m_nb_counters * sizeof(uint32_t) + + sizeof(p_nthw_stat->mp_timestamp)); + struct nt_dma_s *p_dma; + int numa_node = p_adapter_info->fpga_info.numa_node; + + /* FPGA needs a 16K alignment on Statistics */ + p_dma = nt_dma_alloc(n_stat_size, 0x4000, numa_node); + + if (!p_dma) { + NT_LOG_DBGX(ERR, NTNIC, "p_dma alloc failed"); + return -1; + } + + NT_LOG_DBGX(DBG, NTNIC, "%x @%d %" PRIx64 " %" PRIx64, n_stat_size, numa_node, + p_dma->addr, p_dma->iova); + + NT_LOG(DBG, NTNIC, + "DMA: Physical adapter %02d, PA = 0x%016" PRIX64 " DMA = 0x%016" PRIX64 + " size = 0x%" PRIX32 "", + n_physical_adapter_no, p_dma->iova, p_dma->addr, n_stat_size); + + p_nt4ga_stat->p_stat_dma_virtual = (uint32_t *)p_dma->addr; + p_nt4ga_stat->n_stat_size = n_stat_size; + p_nt4ga_stat->p_stat_dma = p_dma; + + memset(p_nt4ga_stat->p_stat_dma_virtual, 0xaa, n_stat_size); + nthw_stat_set_dma_address(p_nthw_stat, p_dma->iova, + p_nt4ga_stat->p_stat_dma_virtual); + } + + if (p_nt4ga_stat->mp_nthw_rmc) + nthw_rmc_unblock(p_nt4ga_stat->mp_nthw_rmc, false); + + if (p_nt4ga_stat->mp_nthw_rpf) + nthw_rpf_unblock(p_nt4ga_stat->mp_nthw_rpf); + + p_nt4ga_stat->mp_stat_structs_color = + calloc(p_nthw_stat->m_nb_color_counters, sizeof(struct color_counters)); + + if (!p_nt4ga_stat->mp_stat_structs_color) { + NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem."); + return -1; + } + + p_nt4ga_stat->mp_stat_structs_hb = + calloc(p_nt4ga_stat->mn_rx_host_buffers + p_nt4ga_stat->mn_tx_host_buffers, + sizeof(struct host_buffer_counters)); + + if (!p_nt4ga_stat->mp_stat_structs_hb) { + NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem."); + return -1; + } + + p_nt4ga_stat->cap.mp_stat_structs_port_rx = + calloc(NUM_ADAPTER_PORTS_MAX, sizeof(struct port_counters_v2)); + + if (!p_nt4ga_stat->cap.mp_stat_structs_port_rx) { + NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem."); + return -1; + } + + p_nt4ga_stat->cap.mp_stat_structs_port_tx = + calloc(NUM_ADAPTER_PORTS_MAX, sizeof(struct port_counters_v2)); + + if (!p_nt4ga_stat->cap.mp_stat_structs_port_tx) { + NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem."); + return -1; + } + + if (get_flow_filter_ops() != NULL) { + struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device; + p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver; + p_nt4ga_stat->mp_stat_structs_flm = calloc(1, sizeof(struct flm_counters_v1)); + + if (!p_nt4ga_stat->mp_stat_structs_flm) { + NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem."); + return -1; + } + + p_nt4ga_stat->mp_stat_structs_flm->max_aps = + nthw_fpga_get_product_param(p_adapter_info->fpga_info.mp_fpga, + NT_FLM_LOAD_APS_MAX, 0); + p_nt4ga_stat->mp_stat_structs_flm->max_lps = + nthw_fpga_get_product_param(p_adapter_info->fpga_info.mp_fpga, + NT_FLM_LOAD_LPS_MAX, 0); + } + + p_nt4ga_stat->mp_port_load = + calloc(NUM_ADAPTER_PORTS_MAX, sizeof(struct port_load_counters)); + + if (!p_nt4ga_stat->mp_port_load) { + NT_LOG_DBGX(ERR, GENERAL, "Cannot allocate mem."); + return -1; + } + +#ifdef NIM_TRIGGER + uint64_t max_bps_speed = nt_get_max_link_speed(p_adapter_info->nt4ga_link.speed_capa); + + if (max_bps_speed == 0) + max_bps_speed = DEFAULT_MAX_BPS_SPEED; + +#else + uint64_t max_bps_speed = DEFAULT_MAX_BPS_SPEED; + NT_LOG(ERR, NTNIC, "NIM module not included"); +#endif + + for (int p = 0; p < NUM_ADAPTER_PORTS_MAX; p++) { + p_nt4ga_stat->mp_port_load[p].rx_bps_max = max_bps_speed; + p_nt4ga_stat->mp_port_load[p].tx_bps_max = max_bps_speed; + p_nt4ga_stat->mp_port_load[p].rx_pps_max = max_bps_speed / (8 * (20 + 64)); + p_nt4ga_stat->mp_port_load[p].tx_pps_max = max_bps_speed / (8 * (20 + 64)); + } + + memset(p_nt4ga_stat->a_stat_structs_color_base, 0, + sizeof(struct color_counters) * NT_MAX_COLOR_FLOW_STATS); + p_nt4ga_stat->last_timestamp = 0; + + nthw_stat_trigger(p_nthw_stat); + + return 0; +} + +/* Called with stat mutex locked */ +static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info, + nt4ga_stat_t *p_nt4ga_stat, + uint32_t *p_stat_dma_virtual) +{ + (void)p_adapter_info; + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) + return -1; + + nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat; + struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device; + + const int n_rx_ports = p_nt4ga_stat->mn_rx_ports; + const int n_tx_ports = p_nt4ga_stat->mn_tx_ports; + int c, h, p; + + if (!p_nthw_stat || !p_nt4ga_stat) + return -1; + + if (p_nthw_stat->mn_stat_layout_version < 6) { + NT_LOG(ERR, NTNIC, "HW STA module version not supported"); + return -1; + } + + /* RX ports */ + for (c = 0; c < p_nthw_stat->m_nb_color_counters / 2; c++) { + p_nt4ga_stat->mp_stat_structs_color[c].color_packets += p_stat_dma_virtual[c * 2]; + p_nt4ga_stat->mp_stat_structs_color[c].color_bytes += + p_stat_dma_virtual[c * 2 + 1]; + } + + /* Move to Host buffer counters */ + p_stat_dma_virtual += p_nthw_stat->m_nb_color_counters; + + for (h = 0; h < p_nthw_stat->m_nb_rx_host_buffers; h++) { + p_nt4ga_stat->mp_stat_structs_hb[h].flush_packets += p_stat_dma_virtual[h * 8]; + p_nt4ga_stat->mp_stat_structs_hb[h].drop_packets += p_stat_dma_virtual[h * 8 + 1]; + p_nt4ga_stat->mp_stat_structs_hb[h].fwd_packets += p_stat_dma_virtual[h * 8 + 2]; + p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_packets += + p_stat_dma_virtual[h * 8 + 3]; + p_nt4ga_stat->mp_stat_structs_hb[h].flush_bytes += p_stat_dma_virtual[h * 8 + 4]; + p_nt4ga_stat->mp_stat_structs_hb[h].drop_bytes += p_stat_dma_virtual[h * 8 + 5]; + p_nt4ga_stat->mp_stat_structs_hb[h].fwd_bytes += p_stat_dma_virtual[h * 8 + 6]; + p_nt4ga_stat->mp_stat_structs_hb[h].dbs_drop_bytes += + p_stat_dma_virtual[h * 8 + 7]; + } + + /* Move to Rx Port counters */ + p_stat_dma_virtual += p_nthw_stat->m_nb_rx_hb_counters; + + /* RX ports */ + for (p = 0; p < n_rx_ports; p++) { + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0]; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].broadcast_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 1]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].multicast_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 2]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].unicast_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 3]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_alignment += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 4]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_code_violation += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 5]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_crc += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 6]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].undersize_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].oversize_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].fragments += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_not_truncated += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].jabbers_truncated += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11]; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_64_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_65_to_127_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_128_to_255_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_256_to_511_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_512_to_1023_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_1024_to_1518_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_1519_to_2047_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_2048_to_4095_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_4096_to_8191_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_8192_to_max_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21]; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].mac_drop_events += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_lr += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 23]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].duplicate += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 24]; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_ip_chksum_error += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 25]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_udp_chksum_error += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 26]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_tcp_chksum_error += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 27]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_giant_undersize += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 28]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_baby_giant += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 29]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_not_isl_vlan_mpls += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 30]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 31]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 32]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 33]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_mpls += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 34]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_mpls += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 35]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_vlan_mpls += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 36]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_isl_vlan_mpls += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 37]; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_no_filter += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dedup_drop += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_filter_drop += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_overflow += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts_dbs_drop += + p_nthw_stat->m_dbs_present + ? p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 42] + : 0; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_no_filter += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 43]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dedup_drop += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 44]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_filter_drop += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 45]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_overflow += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 46]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].octets_dbs_drop += + p_nthw_stat->m_dbs_present + ? p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 47] + : 0; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_hit += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 48]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_first_not_hit += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 49]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_hit += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 50]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_mid_not_hit += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 51]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_hit += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 52]; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].ipft_last_not_hit += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 53]; + + /* Rx totals */ + uint64_t new_drop_events_sum = + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 22] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 38] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 39] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 40] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 41] + + (p_nthw_stat->m_dbs_present + ? p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 42] + : 0); + + uint64_t new_packets_sum = + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 7] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 8] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 9] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 10] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 11] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 12] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 13] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 14] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 15] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 16] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 17] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 18] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 19] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 20] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 21]; + + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].drop_events += new_drop_events_sum; + p_nt4ga_stat->cap.mp_stat_structs_port_rx[p].pkts += new_packets_sum; + + p_nt4ga_stat->a_port_rx_octets_total[p] += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_rx_port_counters + 0]; + p_nt4ga_stat->a_port_rx_packets_total[p] += new_packets_sum; + p_nt4ga_stat->a_port_rx_drops_total[p] += new_drop_events_sum; + } + + /* Move to Tx Port counters */ + p_stat_dma_virtual += n_rx_ports * p_nthw_stat->m_nb_rx_port_counters; + + for (p = 0; p < n_tx_ports; p++) { + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0]; + + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].broadcast_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 1]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].multicast_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 2]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].unicast_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 3]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_alignment += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 4]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_code_violation += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 5]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_crc += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 6]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].undersize_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].oversize_pkts += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].fragments += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_not_truncated += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].jabbers_truncated += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11]; + + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_64_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_65_to_127_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_128_to_255_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_256_to_511_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_512_to_1023_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_1024_to_1518_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_1519_to_2047_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_2048_to_4095_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_4096_to_8191_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_8192_to_max_octets += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21]; + + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].mac_drop_events += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22]; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts_lr += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 23]; + + /* Tx totals */ + uint64_t new_drop_events_sum = + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 22]; + + uint64_t new_packets_sum = + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 7] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 8] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 9] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 10] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 11] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 12] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 13] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 14] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 15] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 16] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 17] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 18] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 19] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 20] + + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 21]; + + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].drop_events += new_drop_events_sum; + p_nt4ga_stat->cap.mp_stat_structs_port_tx[p].pkts += new_packets_sum; + + p_nt4ga_stat->a_port_tx_octets_total[p] += + p_stat_dma_virtual[p * p_nthw_stat->m_nb_tx_port_counters + 0]; + p_nt4ga_stat->a_port_tx_packets_total[p] += new_packets_sum; + p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum; + } + + /* Update and get port load counters */ + for (p = 0; p < n_rx_ports; p++) { + uint32_t val; + nthw_stat_get_load_bps_rx(p_nthw_stat, p, &val); + p_nt4ga_stat->mp_port_load[p].rx_bps = + (uint64_t)(((__uint128_t)val * 32ULL * 64ULL * 8ULL) / + PORT_LOAD_WINDOWS_SIZE); + nthw_stat_get_load_pps_rx(p_nthw_stat, p, &val); + p_nt4ga_stat->mp_port_load[p].rx_pps = + (uint64_t)(((__uint128_t)val * 32ULL) / PORT_LOAD_WINDOWS_SIZE); + } + + for (p = 0; p < n_tx_ports; p++) { + uint32_t val; + nthw_stat_get_load_bps_tx(p_nthw_stat, p, &val); + p_nt4ga_stat->mp_port_load[p].tx_bps = + (uint64_t)(((__uint128_t)val * 32ULL * 64ULL * 8ULL) / + PORT_LOAD_WINDOWS_SIZE); + nthw_stat_get_load_pps_tx(p_nthw_stat, p, &val); + p_nt4ga_stat->mp_port_load[p].tx_pps = + (uint64_t)(((__uint128_t)val * 32ULL) / PORT_LOAD_WINDOWS_SIZE); + } + + /* Update and get FLM stats */ + flow_filter_ops->flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm, + sizeof(struct flm_counters_v1) / sizeof(uint64_t)); + + /* + * Calculate correct load values: + * rpp = nthw_fpga_get_product_param(p_fpga, NT_RPP_PER_PS, 0); + * bin = (uint32_t)(((FLM_LOAD_WINDOWS_SIZE * 1000000000000ULL) / (32ULL * rpp)) - 1ULL); + * load_aps = ((uint64_t)load_aps * 1000000000000ULL) / (uint64_t)((bin+1) * rpp); + * load_lps = ((uint64_t)load_lps * 1000000000000ULL) / (uint64_t)((bin+1) * rpp); + * + * Simplified it gives: + * + * load_lps = (load_lps * 32ULL) / FLM_LOAD_WINDOWS_SIZE + * load_aps = (load_aps * 32ULL) / FLM_LOAD_WINDOWS_SIZE + */ + + p_nt4ga_stat->mp_stat_structs_flm->load_aps = + (p_nt4ga_stat->mp_stat_structs_flm->load_aps * 32ULL) / FLM_LOAD_WINDOWS_SIZE; + p_nt4ga_stat->mp_stat_structs_flm->load_lps = + (p_nt4ga_stat->mp_stat_structs_flm->load_lps * 32ULL) / FLM_LOAD_WINDOWS_SIZE; + return 0; +} + +static struct nt4ga_stat_ops ops = { + .nt4ga_stat_init = nt4ga_stat_init, + .nt4ga_stat_setup = nt4ga_stat_setup, + .nt4ga_stat_collect = nt4ga_stat_collect +}; + +void nt4ga_stat_ops_init(void) +{ + NT_LOG_DBGX(DBG, NTNIC, "Stat module was initialized"); + register_nt4ga_stat_ops(&ops); +} diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c index bde0fed273..e46a3bef28 100644 --- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c +++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c @@ -3,6 +3,7 @@ * Copyright(c) 2023 Napatech A/S */ +#include #include #include "ntos_drv.h" @@ -67,20 +68,20 @@ } \ } while (0) -struct __rte_aligned(8) virtq_avail { +struct __rte_packed virtq_avail { uint16_t flags; uint16_t idx; uint16_t ring[]; /* Queue Size */ }; -struct __rte_aligned(8) virtq_used_elem { +struct __rte_packed virtq_used_elem { /* Index of start of used descriptor chain. */ uint32_t id; /* Total length of the descriptor chain which was used (written to) */ uint32_t len; }; -struct __rte_aligned(8) virtq_used { +struct __rte_packed virtq_used { uint16_t flags; uint16_t idx; struct virtq_used_elem ring[]; /* Queue Size */ diff --git a/drivers/net/ntnic/include/common_adapter_defs.h b/drivers/net/ntnic/include/common_adapter_defs.h new file mode 100644 index 0000000000..6ed9121f0f --- /dev/null +++ b/drivers/net/ntnic/include/common_adapter_defs.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef _COMMON_ADAPTER_DEFS_H_ +#define _COMMON_ADAPTER_DEFS_H_ + +/* + * Declarations shared by NT adapter types. + */ +#define NUM_ADAPTER_MAX (8) +#define NUM_ADAPTER_PORTS_MAX (128) + +#endif /* _COMMON_ADAPTER_DEFS_H_ */ diff --git a/drivers/net/ntnic/include/create_elements.h b/drivers/net/ntnic/include/create_elements.h new file mode 100644 index 0000000000..1456977837 --- /dev/null +++ b/drivers/net/ntnic/include/create_elements.h @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef __CREATE_ELEMENTS_H__ +#define __CREATE_ELEMENTS_H__ + +#include "stdint.h" + +#include "stream_binary_flow_api.h" +#include + +#define MAX_ELEMENTS 64 +#define MAX_ACTIONS 32 + +struct cnv_match_s { + struct rte_flow_item rte_flow_item[MAX_ELEMENTS]; +}; + +struct cnv_attr_s { + struct cnv_match_s match; + struct rte_flow_attr attr; + uint16_t forced_vlan_vid; + uint16_t caller_id; +}; + +struct cnv_action_s { + struct rte_flow_action flow_actions[MAX_ACTIONS]; + struct rte_flow_action_rss flow_rss; + struct flow_action_raw_encap encap; + struct flow_action_raw_decap decap; + struct rte_flow_action_queue queue; +}; + +/* + * Only needed because it eases the use of statistics through NTAPI + * for faster integration into NTAPI version of driver + * Therefore, this is only a good idea when running on a temporary NTAPI + * The query() functionality must go to flow engine, when moved to Open Source driver + */ + +struct rte_flow { + void *flw_hdl; + int used; + + uint32_t flow_stat_id; + + uint64_t stat_pkts; + uint64_t stat_bytes; + uint8_t stat_tcp_flags; + + uint16_t caller_id; +}; + +enum nt_rte_flow_item_type { + NT_RTE_FLOW_ITEM_TYPE_END = INT_MIN, + NT_RTE_FLOW_ITEM_TYPE_TUNNEL, +}; + +extern rte_spinlock_t flow_lock; + +int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size, struct rte_flow_item *out); +int convert_error(struct rte_flow_error *error, struct rte_flow_error *rte_flow_error); +int create_attr(struct cnv_attr_s *attribute, const struct rte_flow_attr *attr); +int create_match_elements(struct cnv_match_s *match, const struct rte_flow_item items[], + int max_elem); +int create_action_elements_inline(struct cnv_action_s *action, + const struct rte_flow_action actions[], + int max_elem, + uint32_t queue_offset); + +#endif /* __CREATE_ELEMENTS_H__ */ diff --git a/drivers/net/ntnic/include/flow_api.h b/drivers/net/ntnic/include/flow_api.h index 984450afdc..d5382669da 100644 --- a/drivers/net/ntnic/include/flow_api.h +++ b/drivers/net/ntnic/include/flow_api.h @@ -6,7 +6,7 @@ #ifndef _FLOW_API_H_ #define _FLOW_API_H_ -#include +#include #include "ntlog.h" @@ -29,11 +29,44 @@ struct hw_mod_resource_s { */ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev); +/** + * A structure used to configure the Receive Side Scaling (RSS) feature + * of an Ethernet port. + */ +struct nt_eth_rss_conf { + /** + * In rte_eth_dev_rss_hash_conf_get(), the *rss_key_len* should be + * greater than or equal to the *hash_key_size* which get from + * rte_eth_dev_info_get() API. And the *rss_key* should contain at least + * *hash_key_size* bytes. If not meet these requirements, the query + * result is unreliable even if the operation returns success. + * + * In rte_eth_dev_rss_hash_update() or rte_eth_dev_configure(), if + * *rss_key* is not NULL, the *rss_key_len* indicates the length of the + * *rss_key* in bytes and it should be equal to *hash_key_size*. + * If *rss_key* is NULL, drivers are free to use a random or a default key. + */ + uint8_t rss_key[MAX_RSS_KEY_LEN]; + /** + * Indicates the type of packets or the specific part of packets to + * which RSS hashing is to be applied. + */ + uint64_t rss_hf; + /** + * Hash algorithm. + */ + enum rte_eth_hash_function algorithm; +}; + +int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask); + struct flow_eth_dev { /* NIC that owns this port device */ struct flow_nic_dev *ndev; /* NIC port id */ uint8_t port; + /* App assigned port_id - may be DPDK port_id */ + uint32_t port_id; /* 0th for exception */ struct flow_queue_id_s rx_queue[FLOW_MAX_QUEUES + 1]; @@ -41,38 +74,119 @@ struct flow_eth_dev { /* VSWITCH has exceptions sent on queue 0 per design */ int num_queues; + /* QSL_HSH index if RSS needed QSL v6+ */ + int rss_target_id; + + /* The size of buffer for aged out flow list */ + uint32_t nb_aging_objects; + struct flow_eth_dev *next; }; +enum flow_nic_hash_e { + HASH_ALGO_ROUND_ROBIN = 0, + HASH_ALGO_5TUPLE, +}; + /* registered NIC backends */ struct flow_nic_dev { uint8_t adapter_no; /* physical adapter no in the host system */ uint16_t ports; /* number of in-ports addressable on this NIC */ + /* flow profile this NIC is initially prepared for */ + enum flow_eth_dev_profile flow_profile; + int flow_mgnt_prepared; struct hw_mod_resource_s res[RES_COUNT];/* raw NIC resource allocation table */ void *km_res_handle; void *kcc_res_handle; + void *flm_mtr_handle; + void *group_handle; + void *hw_db_handle; + void *id_table_handle; + uint32_t flow_unique_id_counter; /* linked list of all flows created on this NIC */ struct flow_handle *flow_base; + /* linked list of all FLM flows created on this NIC */ + struct flow_handle *flow_base_flm; + rte_spinlock_t flow_mtx; /* NIC backend API */ struct flow_api_backend_s be; /* linked list of created eth-port devices on this NIC */ struct flow_eth_dev *eth_base; - pthread_mutex_t mtx; + rte_spinlock_t mtx; + /* RSS hashing configuration */ + struct nt_eth_rss_conf rss_conf; /* next NIC linked list */ struct flow_nic_dev *next; }; +enum flow_nic_err_msg_e { + ERR_SUCCESS = 0, + ERR_FAILED = 1, + ERR_MEMORY = 2, + ERR_OUTPUT_TOO_MANY = 3, + ERR_RSS_TOO_MANY_QUEUES = 4, + ERR_VLAN_TYPE_NOT_SUPPORTED = 5, + ERR_VXLAN_HEADER_NOT_ACCEPTED = 6, + ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7, + ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8, + ERR_MATCH_VLAN_TOO_MANY = 9, + ERR_MATCH_INVALID_IPV6_HDR = 10, + ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11, + ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12, + ERR_MATCH_FAILED_BY_HW_LIMITS = 13, + ERR_MATCH_RESOURCE_EXHAUSTION = 14, + ERR_MATCH_FAILED_TOO_COMPLEX = 15, + ERR_ACTION_REPLICATION_FAILED = 16, + ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17, + ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18, + ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19, + ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20, + ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21, + ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22, + ERR_INTERNAL_QSL_COMPARE_FAILED = 23, + ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24, + ERR_MATCH_ENTROPHY_FAILED = 25, + ERR_MATCH_CAM_EXHAUSTED = 26, + ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27, + ERR_ACTION_UNSUPPORTED = 28, + ERR_REMOVE_FLOW_FAILED = 29, + ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30, + ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31, + ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32, + ERR_OUTPUT_INVALID = 33, + ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34, + ERR_MATCH_CAT_CAM_EXHAUSTED = 35, + ERR_MATCH_KCC_KEY_CLASH = 36, + ERR_MATCH_CAT_CAM_FAILED = 37, + ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38, + ERR_FLOW_PRIORITY_VALUE_INVALID = 39, + ERR_ACTION_MULTIPLE_PORT_ID_UNSUPPORTED = 40, + ERR_RSS_TOO_LONG_KEY = 41, + ERR_ACTION_AGE_UNSUPPORTED_GROUP_0 = 42, + ERR_MSG_NO_MSG +}; + +void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct rte_flow_error *error); + /* * Resources */ extern const char *dbg_res_descr[]; +#define flow_nic_set_bit(arr, x) \ + do { \ + uint8_t *_temp_arr = (arr); \ + size_t _temp_x = (x); \ + _temp_arr[_temp_x / 8] = \ + (uint8_t)(_temp_arr[_temp_x / 8] | (uint8_t)(1 << (_temp_x % 8))); \ + } while (0) + #define flow_nic_unset_bit(arr, x) \ do { \ size_t _temp_x = (x); \ @@ -85,6 +199,18 @@ extern const char *dbg_res_descr[]; (arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \ }) +#define flow_nic_mark_resource_used(_ndev, res_type, index) \ + do { \ + struct flow_nic_dev *_temp_ndev = (_ndev); \ + typeof(res_type) _temp_res_type = (res_type); \ + size_t _temp_index = (index); \ + NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu", \ + dbg_res_descr[_temp_res_type], _temp_index); \ + assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, \ + _temp_index) == 0); \ + flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \ + } while (0) + #define flow_nic_mark_resource_unused(_ndev, res_type, index) \ do { \ typeof(res_type) _temp_res_type = (res_type); \ @@ -97,8 +223,20 @@ extern const char *dbg_res_descr[]; #define flow_nic_is_resource_used(_ndev, res_type, index) \ (!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index)) +int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, + uint32_t alignment); + +int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type, + unsigned int num, uint32_t alignment); void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx); +int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index); int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index); +int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm); +int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx, + struct nt_eth_rss_conf rss_conf); + +int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size); + #endif diff --git a/drivers/net/ntnic/include/flow_api_engine.h b/drivers/net/ntnic/include/flow_api_engine.h index db5e6fe09d..5eace2614f 100644 --- a/drivers/net/ntnic/include/flow_api_engine.h +++ b/drivers/net/ntnic/include/flow_api_engine.h @@ -6,6 +6,12 @@ #ifndef _FLOW_API_ENGINE_H_ #define _FLOW_API_ENGINE_H_ +#include +#include + +#include "hw_mod_backend.h" +#include "stream_binary_flow_api.h" + /* * Resource management */ @@ -41,8 +47,382 @@ enum res_type_e { RES_INVALID }; +/* + * Flow NIC offload management + */ +#define MAX_OUTPUT_DEST (128) + +#define MAX_WORD_NUM 24 +#define MAX_BANKS 6 + +#define MAX_TCAM_START_OFFSETS 4 + +#define MAX_FLM_MTRS_SUPPORTED 4 +#define MAX_CPY_WRITERS_SUPPORTED 8 + +#define MAX_MATCH_FIELDS 16 + +/* + * 128 128 32 32 32 + * Have | QW0 || QW4 || SW8 || SW9 | SWX in FPGA + * + * Each word may start at any offset, though + * they are combined in chronological order, with all enabled to + * build the extracted match data, thus that is how the match key + * must be build + */ +enum extractor_e { + KM_USE_EXTRACTOR_UNDEF, + KM_USE_EXTRACTOR_QWORD, + KM_USE_EXTRACTOR_SWORD, +}; + +struct match_elem_s { + enum extractor_e extr; + int masked_for_tcam; /* if potentially selected for TCAM */ + uint32_t e_word[4]; + uint32_t e_mask[4]; + + int extr_start_offs_id; + int8_t rel_offs; + uint32_t word_len; +}; + +enum cam_tech_use_e { + KM_CAM, + KM_TCAM, + KM_SYNERGY +}; + +struct km_flow_def_s { + struct flow_api_backend_s *be; + + /* For keeping track of identical entries */ + struct km_flow_def_s *reference; + struct km_flow_def_s *root; + + /* For collect flow elements and sorting */ + struct match_elem_s match[MAX_MATCH_FIELDS]; + struct match_elem_s *match_map[MAX_MATCH_FIELDS]; + int num_ftype_elem; + + /* Finally formatted CAM/TCAM entry */ + enum cam_tech_use_e target; + uint32_t entry_word[MAX_WORD_NUM]; + uint32_t entry_mask[MAX_WORD_NUM]; + int key_word_size; + + /* TCAM calculated possible bank start offsets */ + int start_offsets[MAX_TCAM_START_OFFSETS]; + int num_start_offsets; + + /* Flow information */ + /* HW input port ID needed for compare. In port must be identical on flow types */ + uint32_t port_id; + uint32_t info; /* used for color (actions) */ + int info_set; + int flow_type; /* 0 is illegal and used as unset */ + int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */ + + /* CAM specific bank management */ + int cam_paired; + int record_indexes[MAX_BANKS]; + int bank_used; + uint32_t *cuckoo_moves; /* for CAM statistics only */ + struct cam_distrib_s *cam_dist; + struct hasher_s *hsh; + + /* TCAM specific bank management */ + struct tcam_distrib_s *tcam_dist; + int tcam_start_bank; + int tcam_record; +}; + +/* + * RSS configuration, see struct rte_flow_action_rss + */ +struct hsh_def_s { + enum rte_eth_hash_function func; /* RSS hash function to apply */ + /* RSS hash types, see definition of RTE_ETH_RSS_* for hash calculation options */ + uint64_t types; + uint32_t key_len; /* Hash key length in bytes. */ + const uint8_t *key; /* Hash key. */ +}; + +/* + * AGE configuration, see struct rte_flow_action_age + */ +struct age_def_s { + uint32_t timeout; + void *context; +}; + +/* + * Tunnel encapsulation header definition + */ +#define MAX_TUN_HDR_SIZE 128 + +struct tunnel_header_s { + union { + uint8_t hdr8[MAX_TUN_HDR_SIZE]; + uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4]; + } d; + + uint8_t len; + + uint8_t nb_vlans; + + uint8_t ip_version; /* 4: v4, 6: v6 */ + + uint8_t new_outer; + uint8_t l2_len; + uint8_t l3_len; + uint8_t l4_len; +}; + +enum flow_port_type_e { + PORT_NONE, /* not defined or drop */ + PORT_INTERNAL, /* no queues attached */ + PORT_PHY, /* MAC phy output queue */ + PORT_VIRT, /* Memory queues to Host */ +}; + +struct output_s { + uint32_t owning_port_id;/* the port who owns this output destination */ + enum flow_port_type_e type; + int id; /* depending on port type: queue ID or physical port id or not used */ + int active; /* activated */ +}; + +struct nic_flow_def { + /* + * Frame Decoder match info collected + */ + int l2_prot; + int l3_prot; + int l4_prot; + int tunnel_prot; + int tunnel_l3_prot; + int tunnel_l4_prot; + int vlans; + int fragmentation; + int ip_prot; + int tunnel_ip_prot; + /* + * Additional meta data for various functions + */ + int in_port_override; + int non_empty; /* default value is -1; value 1 means flow actions update */ + struct output_s dst_id[MAX_OUTPUT_DEST];/* define the output to use */ + /* total number of available queues defined for all outputs - i.e. number of dst_id's */ + int dst_num_avail; + + /* + * Mark or Action info collection + */ + uint32_t mark; + + uint32_t jump_to_group; + + uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED]; + + int full_offload; + + /* + * Action push tunnel + */ + struct tunnel_header_s tun_hdr; + + /* + * If DPDK RTE tunnel helper API used + * this holds the tunnel if used in flow + */ + struct tunnel_s *tnl; + + /* + * Header Stripper + */ + int header_strip_end_dyn; + int header_strip_end_ofs; + + /* + * Modify field + */ + struct { + uint32_t select; + uint32_t dyn; + uint32_t ofs; + uint32_t len; + uint32_t level; + union { + uint8_t value8[16]; + uint16_t value16[8]; + uint32_t value32[4]; + }; + } modify_field[MAX_CPY_WRITERS_SUPPORTED]; + + uint32_t modify_field_count; + uint8_t ttl_sub_enable; + uint8_t ttl_sub_ipv4; + uint8_t ttl_sub_outer; + + /* + * Key Matcher flow definitions + */ + struct km_flow_def_s km; + + /* + * Hash module RSS definitions + */ + struct hsh_def_s hsh; + + /* + * AGE action timeout + */ + struct age_def_s age; + + /* + * TX fragmentation IFR/RPP_LR MTU recipe + */ + uint8_t flm_mtu_fragmentation_recipe; +}; + +enum flow_handle_type { + FLOW_HANDLE_TYPE_FLOW, + FLOW_HANDLE_TYPE_FLM, +}; + +struct flow_handle { + enum flow_handle_type type; + uint32_t flm_id; + uint16_t caller_id; + uint16_t learn_ignored; + + struct flow_eth_dev *dev; + struct flow_handle *next; + struct flow_handle *prev; + + /* Flow specific pointer to application data stored during action creation. */ + void *context; + void *user_data; + + union { + struct { + /* + * 1st step conversion and validation of flow + * verified and converted flow match + actions structure + */ + struct nic_flow_def *fd; + /* + * 2nd step NIC HW resource allocation and configuration + * NIC resource management structures + */ + struct { + uint32_t db_idx_counter; + uint32_t db_idxs[RES_COUNT]; + }; + uint32_t port_id; /* MAC port ID or override of virtual in_port */ + }; + + struct { + uint32_t flm_db_idx_counter; + uint32_t flm_db_idxs[RES_COUNT]; + + uint32_t flm_mtr_ids[MAX_FLM_MTRS_SUPPORTED]; + + uint32_t flm_data[10]; + uint8_t flm_prot; + uint8_t flm_kid; + uint8_t flm_prio; + uint8_t flm_ft; + + uint16_t flm_rpl_ext_ptr; + uint32_t flm_nat_ipv4; + uint16_t flm_nat_port; + uint8_t flm_dscp; + uint32_t flm_teid; + uint8_t flm_rqi; + uint8_t flm_qfi; + uint8_t flm_scrub_prof; + + uint8_t flm_mtu_fragmentation_recipe; + + /* Flow specific pointer to application template table cell stored during + * flow create. + */ + struct flow_template_table_cell *template_table_cell; + bool flm_async; + }; + }; +}; + +struct flow_pattern_template { + struct nic_flow_def *fd; +}; + +struct flow_actions_template { + struct nic_flow_def *fd; + + uint32_t num_dest_port; + uint32_t num_queues; +}; + +struct flow_template_table_cell { + atomic_int status; + atomic_int counter; + + uint32_t flm_db_idx_counter; + uint32_t flm_db_idxs[RES_COUNT]; + + uint32_t flm_key_id; + uint32_t flm_ft; + + uint16_t flm_rpl_ext_ptr; + uint8_t flm_scrub_prof; +}; + +struct flow_template_table { + struct flow_pattern_template **pattern_templates; + uint8_t nb_pattern_templates; + + struct flow_actions_template **actions_templates; + uint8_t nb_actions_templates; + + struct flow_template_table_cell *pattern_action_pairs; + + struct rte_flow_attr attr; + uint16_t forced_vlan_vid; + uint16_t caller_id; +}; + +void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle); void km_free_ndev_resource_management(void **handle); +int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4], uint32_t e_mask[4], + uint32_t word_len, enum frame_offs_e start, int8_t offset); + +int km_key_create(struct km_flow_def_s *km, uint32_t port_id); +/* + * Compares 2 KM key definitions after first collect validate and optimization. + * km is compared against an existing km1. + * if identical, km1 flow_type is returned + */ +int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1); + +int km_rcp_set(struct km_flow_def_s *km, int index); + +int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color); +int km_clear_data_match_entry(struct km_flow_def_s *km); + void kcc_free_ndev_resource_management(void **handle); +/* + * Group management + */ +int flow_group_handle_create(void **handle, uint32_t group_count); +int flow_group_handle_destroy(void **handle); + +int flow_group_translate_get(void *handle, uint8_t owner_id, uint8_t port_id, uint32_t group_in, + uint32_t *group_out); + #endif /* _FLOW_API_ENGINE_H_ */ diff --git a/drivers/net/ntnic/include/flow_filter.h b/drivers/net/ntnic/include/flow_filter.h index d204c0d882..01777f8c9f 100644 --- a/drivers/net/ntnic/include/flow_filter.h +++ b/drivers/net/ntnic/include/flow_filter.h @@ -11,5 +11,6 @@ int flow_filter_init(nthw_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device, int adapter_no); int flow_filter_done(struct flow_nic_dev *dev); +int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size); #endif /* __FLOW_FILTER_HPP__ */ diff --git a/drivers/net/ntnic/include/hw_mod_backend.h b/drivers/net/ntnic/include/hw_mod_backend.h index 34154c65f8..f91a3ed058 100644 --- a/drivers/net/ntnic/include/hw_mod_backend.h +++ b/drivers/net/ntnic/include/hw_mod_backend.h @@ -120,6 +120,21 @@ enum { } \ } while (0) +static inline int is_non_zero(const void *addr, size_t n) +{ + size_t i = 0; + const uint8_t *p = (const uint8_t *)addr; + + for (i = 0; i < n; i++) + if (p[i] != 0) + return 1; + + return 0; +} + +/* Sideband info bit indicator */ +#define SWX_INFO (1 << 6) + enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 @@ -133,6 +148,112 @@ enum km_flm_if_select_e { unsigned int alloced_size; \ int debug +enum frame_offs_e { + DYN_SOF = 0, + DYN_L2 = 1, + DYN_FIRST_VLAN = 2, + DYN_MPLS = 3, + DYN_L3 = 4, + DYN_ID_IPV4_6 = 5, + DYN_FINAL_IP_DST = 6, + DYN_L4 = 7, + DYN_L4_PAYLOAD = 8, + DYN_TUN_PAYLOAD = 9, + DYN_TUN_L2 = 10, + DYN_TUN_VLAN = 11, + DYN_TUN_MPLS = 12, + DYN_TUN_L3 = 13, + DYN_TUN_ID_IPV4_6 = 14, + DYN_TUN_FINAL_IP_DST = 15, + DYN_TUN_L4 = 16, + DYN_TUN_L4_PAYLOAD = 17, + DYN_EOF = 18, + DYN_L3_PAYLOAD_END = 19, + DYN_TUN_L3_PAYLOAD_END = 20, + SB_VNI = SWX_INFO | 1, + SB_MAC_PORT = SWX_INFO | 2, + SB_KCC_ID = SWX_INFO | 3 +}; + +enum { + QW0_SEL_EXCLUDE = 0, + QW0_SEL_FIRST32 = 1, + QW0_SEL_FIRST64 = 3, + QW0_SEL_ALL128 = 4, +}; + +enum { + QW4_SEL_EXCLUDE = 0, + QW4_SEL_FIRST32 = 1, + QW4_SEL_FIRST64 = 2, + QW4_SEL_ALL128 = 3, +}; + +enum { + DW8_SEL_EXCLUDE = 0, + DW8_SEL_FIRST32 = 3, +}; + +enum { + DW10_SEL_EXCLUDE = 0, + DW10_SEL_FIRST32 = 2, +}; + +enum { + SWX_SEL_EXCLUDE = 0, + SWX_SEL_ALL32 = 1, +}; + +enum { + PROT_OTHER = 0, + PROT_L2_ETH2 = 1, +}; + +enum { + PROT_L3_IPV4 = 1, + PROT_L3_IPV6 = 2 +}; + +enum { + PROT_L4_TCP = 1, + PROT_L4_UDP = 2, + PROT_L4_SCTP = 3, + PROT_L4_ICMP = 4 +}; + +enum { + PROT_TUN_GTPV1U = 6, +}; + +enum { + PROT_TUN_L3_OTHER = 0, + PROT_TUN_L3_IPV4 = 1, + PROT_TUN_L3_IPV6 = 2 +}; + +enum { + PROT_TUN_L4_OTHER = 0, + PROT_TUN_L4_TCP = 1, + PROT_TUN_L4_UDP = 2, + PROT_TUN_L4_SCTP = 3, + PROT_TUN_L4_ICMP = 4 +}; + + +enum { + HASH_HASH_NONE = 0, + HASH_5TUPLE = 8, +}; + +enum { + CPY_SELECT_DSCP_IPV4 = 0, + CPY_SELECT_DSCP_IPV6 = 1, + CPY_SELECT_RQI_QFI = 2, + CPY_SELECT_IPV4 = 3, + CPY_SELECT_PORT = 4, + CPY_SELECT_TEID = 5, +}; + struct common_func_s { COMMON_FUNC_INFO_S; }; @@ -245,11 +366,63 @@ int hw_mod_cat_reset(struct flow_api_backend_s *be); int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx, int count); int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, int word_off, uint32_t value); +/* KCE/KCS/FTE KM */ +int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count); +int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value); +int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value); +int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count); +int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value); +int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value); +int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count); +int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value); +int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value); +/* KCE/KCS/FTE FLM */ +int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count); +int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value); +int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value); +int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count); +int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value); +int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value); +int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count); +int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value); +int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value); int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t value); +int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value); + int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t value); +int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value); + int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t value); + int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx, int count); + int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx, int count); int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx, int count); @@ -346,13 +519,24 @@ int hw_mod_km_alloc(struct flow_api_backend_s *be); void hw_mod_km_free(struct flow_api_backend_s *be); int hw_mod_km_reset(struct flow_api_backend_s *be); int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field, int index, int word_off, + uint32_t value); +int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field, int index, int word_off, + uint32_t *value); int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank, int start_record, int count); +int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int record, + uint32_t value); + int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank, int count); int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int byte, int byte_val, uint32_t *value_set); +int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int byte, + int byte_val, uint32_t *value_set); int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank, int start_record, int count); +int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int record, + uint32_t value); int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank, int start_record, int count); @@ -482,11 +666,48 @@ int hw_mod_flm_reset(struct flow_api_backend_s *be); int hw_mod_flm_control_flush(struct flow_api_backend_s *be); int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value); +int hw_mod_flm_status_update(struct flow_api_backend_s *be); +int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value); + int hw_mod_flm_scan_flush(struct flow_api_backend_s *be); +int hw_mod_flm_scan_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value); + +int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be); +int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value); + +int hw_mod_flm_prio_flush(struct flow_api_backend_s *be); +int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value); + +int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t value); int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t *value); +int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t value); + +int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be); +int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value); + +int hw_mod_flm_stat_update(struct flow_api_backend_s *be); +int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value); +int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e field, + const uint32_t *value, uint32_t records, + uint32_t *handled_records, uint32_t *inf_word_cnt, + uint32_t *sta_word_cnt); +int hw_mod_flm_inf_sta_data_update_get(struct flow_api_backend_s *be, enum hw_flm_e field, + uint32_t *inf_value, uint32_t inf_size, + uint32_t *inf_word_cnt, uint32_t *sta_value, + uint32_t sta_size, uint32_t *sta_word_cnt); + +uint32_t hw_mod_flm_scrub_timeout_decode(uint32_t t_enc); +uint32_t hw_mod_flm_scrub_timeout_encode(uint32_t t); int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t value); struct hsh_func_s { COMMON_FUNC_INFO_S; @@ -532,6 +753,8 @@ int hw_mod_hsh_alloc(struct flow_api_backend_s *be); void hw_mod_hsh_free(struct flow_api_backend_s *be); int hw_mod_hsh_reset(struct flow_api_backend_s *be); int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field, uint32_t index, + uint32_t word_off, uint32_t value); struct qsl_func_s { COMMON_FUNC_INFO_S; @@ -572,8 +795,16 @@ int hw_mod_qsl_alloc(struct flow_api_backend_s *be); void hw_mod_qsl_free(struct flow_api_backend_s *be); int hw_mod_qsl_reset(struct flow_api_backend_s *be); int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t value); int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t value); int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t value); +int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t *value); int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx, int count); int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, uint32_t value); @@ -603,6 +834,8 @@ int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be); void hw_mod_slc_lr_free(struct flow_api_backend_s *be); int hw_mod_slc_lr_reset(struct flow_api_backend_s *be); int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field, uint32_t index, + uint32_t value); struct pdb_func_s { COMMON_FUNC_INFO_S; @@ -644,6 +877,9 @@ int hw_mod_pdb_alloc(struct flow_api_backend_s *be); void hw_mod_pdb_free(struct flow_api_backend_s *be); int hw_mod_pdb_reset(struct flow_api_backend_s *be); int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field, uint32_t index, + uint32_t value); + int hw_mod_pdb_config_flush(struct flow_api_backend_s *be); struct tpe_func_s { @@ -718,24 +954,44 @@ void hw_mod_tpe_free(struct flow_api_backend_s *be); int hw_mod_tpe_reset(struct flow_api_backend_s *be); int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t *value); int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count); +int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value); enum debug_mode_e { FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000, diff --git a/drivers/net/ntnic/include/nt4ga_adapter.h b/drivers/net/ntnic/include/nt4ga_adapter.h index 809135f130..7396b8ab65 100644 --- a/drivers/net/ntnic/include/nt4ga_adapter.h +++ b/drivers/net/ntnic/include/nt4ga_adapter.h @@ -6,6 +6,7 @@ #ifndef _NT4GA_ADAPTER_H_ #define _NT4GA_ADAPTER_H_ +#include "ntnic_stat.h" #include "nt4ga_link.h" typedef struct hw_info_s { @@ -27,9 +28,9 @@ typedef struct hw_info_s { * Services provided by the adapter module */ #include "nt4ga_filter.h" -#include "ntnic_stat.h" typedef struct adapter_info_s { + struct nt4ga_stat_s nt4ga_stat; struct nt4ga_filter_s nt4ga_filter; struct nt4ga_link_s nt4ga_link; diff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h index 8017aa4fc3..78cf10368a 100644 --- a/drivers/net/ntnic/include/ntdrv_4ga.h +++ b/drivers/net/ntnic/include/ntdrv_4ga.h @@ -7,6 +7,7 @@ #define __NTDRV_4GA_H__ #include "nt4ga_adapter.h" +#include typedef struct ntdrv_4ga_s { uint32_t pciident; @@ -14,6 +15,10 @@ typedef struct ntdrv_4ga_s { char *p_drv_name; volatile bool b_shutdown; + rte_thread_t flm_thread; + rte_spinlock_t stat_lck; + rte_thread_t stat_thread; + rte_thread_t port_event_thread; } ntdrv_4ga_t; #endif /* __NTDRV_4GA_H__ */ diff --git a/drivers/net/ntnic/include/ntnic_stat.h b/drivers/net/ntnic/include/ntnic_stat.h index 148088fe1d..4d4affa3cf 100644 --- a/drivers/net/ntnic/include/ntnic_stat.h +++ b/drivers/net/ntnic/include/ntnic_stat.h @@ -6,6 +6,271 @@ #ifndef NTNIC_STAT_H_ #define NTNIC_STAT_H_ +#include "common_adapter_defs.h" #include "nthw_rmc.h" +#include "nthw_rpf.h" +#include "nthw_fpga_model.h" + +#define NT_MAX_COLOR_FLOW_STATS 0x400 + +struct nthw_stat { + nthw_fpga_t *mp_fpga; + nthw_module_t *mp_mod_stat; + int mn_instance; + + int mn_stat_layout_version; + + bool mb_has_tx_stats; + + int m_nb_phy_ports; + int m_nb_nim_ports; + + int m_nb_rx_ports; + int m_nb_tx_ports; + + int m_nb_rx_host_buffers; + int m_nb_tx_host_buffers; + + int m_dbs_present; + + int m_rx_port_replicate; + + int m_nb_color_counters; + + int m_nb_rx_hb_counters; + int m_nb_tx_hb_counters; + + int m_nb_rx_port_counters; + int m_nb_tx_port_counters; + + int m_nb_counters; + + int m_nb_rpp_per_ps; + + nthw_field_t *mp_fld_dma_ena; + nthw_field_t *mp_fld_cnt_clear; + + nthw_field_t *mp_fld_tx_disable; + + nthw_field_t *mp_fld_cnt_freeze; + + nthw_field_t *mp_fld_stat_toggle_missed; + + nthw_field_t *mp_fld_dma_lsb; + nthw_field_t *mp_fld_dma_msb; + + nthw_field_t *mp_fld_load_bin; + nthw_field_t *mp_fld_load_bps_rx0; + nthw_field_t *mp_fld_load_bps_rx1; + nthw_field_t *mp_fld_load_bps_tx0; + nthw_field_t *mp_fld_load_bps_tx1; + nthw_field_t *mp_fld_load_pps_rx0; + nthw_field_t *mp_fld_load_pps_rx1; + nthw_field_t *mp_fld_load_pps_tx0; + nthw_field_t *mp_fld_load_pps_tx1; + + uint64_t m_stat_dma_physical; + uint32_t *mp_stat_dma_virtual; + + uint64_t *mp_timestamp; +}; + +typedef struct nthw_stat nthw_stat_t; +typedef struct nthw_stat nthw_stat; + +struct color_counters { + uint64_t color_packets; + uint64_t color_bytes; + uint8_t tcp_flags; +}; + +struct host_buffer_counters { + uint64_t flush_packets; + uint64_t drop_packets; + uint64_t fwd_packets; + uint64_t dbs_drop_packets; + uint64_t flush_bytes; + uint64_t drop_bytes; + uint64_t fwd_bytes; + uint64_t dbs_drop_bytes; +}; + +struct port_load_counters { + uint64_t rx_pps; + uint64_t rx_pps_max; + uint64_t tx_pps; + uint64_t tx_pps_max; + uint64_t rx_bps; + uint64_t rx_bps_max; + uint64_t tx_bps; + uint64_t tx_bps_max; +}; + +struct port_counters_v2 { + /* Rx/Tx common port counters */ + uint64_t drop_events; + uint64_t pkts; + /* FPGA counters */ + uint64_t octets; + uint64_t broadcast_pkts; + uint64_t multicast_pkts; + uint64_t unicast_pkts; + uint64_t pkts_alignment; + uint64_t pkts_code_violation; + uint64_t pkts_crc; + uint64_t undersize_pkts; + uint64_t oversize_pkts; + uint64_t fragments; + uint64_t jabbers_not_truncated; + uint64_t jabbers_truncated; + uint64_t pkts_64_octets; + uint64_t pkts_65_to_127_octets; + uint64_t pkts_128_to_255_octets; + uint64_t pkts_256_to_511_octets; + uint64_t pkts_512_to_1023_octets; + uint64_t pkts_1024_to_1518_octets; + uint64_t pkts_1519_to_2047_octets; + uint64_t pkts_2048_to_4095_octets; + uint64_t pkts_4096_to_8191_octets; + uint64_t pkts_8192_to_max_octets; + uint64_t mac_drop_events; + uint64_t pkts_lr; + /* Rx only port counters */ + uint64_t duplicate; + uint64_t pkts_ip_chksum_error; + uint64_t pkts_udp_chksum_error; + uint64_t pkts_tcp_chksum_error; + uint64_t pkts_giant_undersize; + uint64_t pkts_baby_giant; + uint64_t pkts_not_isl_vlan_mpls; + uint64_t pkts_isl; + uint64_t pkts_vlan; + uint64_t pkts_isl_vlan; + uint64_t pkts_mpls; + uint64_t pkts_isl_mpls; + uint64_t pkts_vlan_mpls; + uint64_t pkts_isl_vlan_mpls; + uint64_t pkts_no_filter; + uint64_t pkts_dedup_drop; + uint64_t pkts_filter_drop; + uint64_t pkts_overflow; + uint64_t pkts_dbs_drop; + uint64_t octets_no_filter; + uint64_t octets_dedup_drop; + uint64_t octets_filter_drop; + uint64_t octets_overflow; + uint64_t octets_dbs_drop; + uint64_t ipft_first_hit; + uint64_t ipft_first_not_hit; + uint64_t ipft_mid_hit; + uint64_t ipft_mid_not_hit; + uint64_t ipft_last_hit; + uint64_t ipft_last_not_hit; +}; + +struct flm_counters_v1 { + /* FLM 0.17 */ + uint64_t current; + uint64_t learn_done; + uint64_t learn_ignore; + uint64_t learn_fail; + uint64_t unlearn_done; + uint64_t unlearn_ignore; + uint64_t auto_unlearn_done; + uint64_t auto_unlearn_ignore; + uint64_t auto_unlearn_fail; + uint64_t timeout_unlearn_done; + uint64_t rel_done; + uint64_t rel_ignore; + /* FLM 0.20 */ + uint64_t prb_done; + uint64_t prb_ignore; + uint64_t sta_done; + uint64_t inf_done; + uint64_t inf_skip; + uint64_t pck_hit; + uint64_t pck_miss; + uint64_t pck_unh; + uint64_t pck_dis; + uint64_t csh_hit; + uint64_t csh_miss; + uint64_t csh_unh; + uint64_t cuc_start; + uint64_t cuc_move; + /* FLM 0.17 Load */ + uint64_t load_lps; + uint64_t load_aps; + uint64_t max_lps; + uint64_t max_aps; +}; + +struct nt4ga_stat_s { + nthw_stat_t *mp_nthw_stat; + nthw_rmc_t *mp_nthw_rmc; + nthw_rpf_t *mp_nthw_rpf; + struct nt_dma_s *p_stat_dma; + uint32_t *p_stat_dma_virtual; + uint32_t n_stat_size; + + uint64_t last_timestamp; + + int mn_rx_host_buffers; + int mn_tx_host_buffers; + + int mn_rx_ports; + int mn_tx_ports; + + struct color_counters *mp_stat_structs_color; + /* For calculating increments between stats polls */ + struct color_counters a_stat_structs_color_base[NT_MAX_COLOR_FLOW_STATS]; + + /* Port counters for inline */ + struct { + struct port_counters_v2 *mp_stat_structs_port_rx; + struct port_counters_v2 *mp_stat_structs_port_tx; + } cap; + + struct host_buffer_counters *mp_stat_structs_hb; + struct port_load_counters *mp_port_load; + + int flm_stat_ver; + struct flm_counters_v1 *mp_stat_structs_flm; + + /* Rx/Tx totals: */ + uint64_t n_totals_reset_timestamp; /* timestamp for last totals reset */ + + uint64_t a_port_rx_octets_total[NUM_ADAPTER_PORTS_MAX]; + /* Base is for calculating increments between statistics reads */ + uint64_t a_port_rx_octets_base[NUM_ADAPTER_PORTS_MAX]; + + uint64_t a_port_rx_packets_total[NUM_ADAPTER_PORTS_MAX]; + uint64_t a_port_rx_packets_base[NUM_ADAPTER_PORTS_MAX]; + + uint64_t a_port_rx_drops_total[NUM_ADAPTER_PORTS_MAX]; + uint64_t a_port_rx_drops_base[NUM_ADAPTER_PORTS_MAX]; + + uint64_t a_port_tx_octets_total[NUM_ADAPTER_PORTS_MAX]; + uint64_t a_port_tx_octets_base[NUM_ADAPTER_PORTS_MAX]; + + uint64_t a_port_tx_packets_base[NUM_ADAPTER_PORTS_MAX]; + uint64_t a_port_tx_packets_total[NUM_ADAPTER_PORTS_MAX]; + + uint64_t a_port_tx_drops_total[NUM_ADAPTER_PORTS_MAX]; +}; + +typedef struct nt4ga_stat_s nt4ga_stat_t; + +nthw_stat_t *nthw_stat_new(void); +int nthw_stat_init(nthw_stat_t *p, nthw_fpga_t *p_fpga, int n_instance); +void nthw_stat_delete(nthw_stat_t *p); + +int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical, + uint32_t *p_stat_dma_virtual); +int nthw_stat_trigger(nthw_stat_t *p); + +int nthw_stat_get_load_bps_rx(nthw_stat_t *p, uint8_t port, uint32_t *val); +int nthw_stat_get_load_bps_tx(nthw_stat_t *p, uint8_t port, uint32_t *val); +int nthw_stat_get_load_pps_rx(nthw_stat_t *p, uint8_t port, uint32_t *val); +int nthw_stat_get_load_pps_tx(nthw_stat_t *p, uint8_t port, uint32_t *val); #endif /* NTNIC_STAT_H_ */ diff --git a/drivers/net/ntnic/include/ntos_drv.h b/drivers/net/ntnic/include/ntos_drv.h index d51d1e3677..f6ce442d17 100644 --- a/drivers/net/ntnic/include/ntos_drv.h +++ b/drivers/net/ntnic/include/ntos_drv.h @@ -12,6 +12,7 @@ #include #include +#include "rte_mtr.h" #include "stream_binary_flow_api.h" #include "nthw_drv.h" @@ -57,6 +58,9 @@ struct __rte_cache_aligned ntnic_rx_queue { struct flow_queue_id_s queue; /* queue info - user id and hw queue index */ struct rte_mempool *mb_pool; /* mbuf memory pool */ uint16_t buf_size; /* Size of data area in mbuf */ + unsigned long rx_pkts; /* Rx packet statistics */ + unsigned long rx_bytes; /* Rx bytes statistics */ + unsigned long err_pkts; /* Rx error packet statistics */ int enabled; /* Enabling/disabling of this queue */ struct hwq_s hwq; @@ -80,12 +84,29 @@ struct __rte_cache_aligned ntnic_tx_queue { int rss_target_id; uint32_t port; /* Tx port for this queue */ + unsigned long tx_pkts; /* Tx packet statistics */ + unsigned long tx_bytes; /* Tx bytes statistics */ + unsigned long err_pkts; /* Tx error packet stat */ int enabled; /* Enabling/disabling of this queue */ enum fpga_info_profile profile; /* Inline / Capture */ }; +struct nt_mtr_profile { + LIST_ENTRY(nt_mtr_profile) next; + uint32_t profile_id; + struct rte_mtr_meter_profile profile; +}; + +struct nt_mtr { + LIST_ENTRY(nt_mtr) next; + uint32_t mtr_id; + int shared; + struct nt_mtr_profile *profile; +}; + struct pmd_internals { const struct rte_pci_device *pci_dev; + struct flow_eth_dev *flw_dev; char name[20]; int n_intf_no; int lpbk_mode; @@ -94,6 +115,7 @@ struct pmd_internals { /* Offset of the VF from the PF */ uint8_t vf_offset; uint32_t port; + uint32_t port_id; nt_meta_port_type_t type; struct flow_queue_id_s vpq[MAX_QUEUES]; unsigned int vpq_nb_vq; @@ -106,6 +128,8 @@ struct pmd_internals { struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT]; /* Multicast ethernet (MAC) addresses. */ struct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT]; + uint64_t last_stat_rtc; + uint64_t rx_missed; struct pmd_internals *next; }; diff --git a/drivers/net/ntnic/include/stream_binary_flow_api.h b/drivers/net/ntnic/include/stream_binary_flow_api.h index 10529b8843..4ce1561033 100644 --- a/drivers/net/ntnic/include/stream_binary_flow_api.h +++ b/drivers/net/ntnic/include/stream_binary_flow_api.h @@ -6,17 +6,84 @@ #ifndef _STREAM_BINARY_FLOW_API_H_ #define _STREAM_BINARY_FLOW_API_H_ +#include +#include "rte_flow.h" +#include "rte_flow_driver.h" + +/* Max RSS hash key length in bytes */ +#define MAX_RSS_KEY_LEN 40 + +/* NT specific MASKs for RSS configuration */ +/* NOTE: Masks are required for correct RSS configuration, do not modify them! */ +#define NT_ETH_RSS_IPV4_MASK \ + (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ + RTE_ETH_RSS_NONFRAG_IPV4_UDP) + +#define NT_ETH_RSS_IPV6_MASK \ + (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_IPV6_EX | \ + RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_NONFRAG_IPV6_UDP) + +#define NT_ETH_RSS_IP_MASK \ + (NT_ETH_RSS_IPV4_MASK | NT_ETH_RSS_IPV6_MASK | RTE_ETH_RSS_L3_SRC_ONLY | \ + RTE_ETH_RSS_L3_DST_ONLY) + +/* List of all RSS flags supported for RSS calculation offload */ +#define NT_ETH_RSS_OFFLOAD_MASK \ + (RTE_ETH_RSS_ETH | RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | \ + RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY | \ + RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L3_SRC_ONLY | \ + RTE_ETH_RSS_L3_DST_ONLY | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_LEVEL_MASK | \ + RTE_ETH_RSS_IPV4_CHKSUM | RTE_ETH_RSS_L4_CHKSUM | RTE_ETH_RSS_PORT | RTE_ETH_RSS_GTPU) + /* * Flow frontend for binary programming interface */ #define FLOW_MAX_QUEUES 128 +#define RAW_ENCAP_DECAP_ELEMS_MAX 16 + +extern uint64_t rte_tsc_freq; +extern rte_spinlock_t hwlock; + +/* + * Flow eth dev profile determines how the FPGA module resources are + * managed and what features are available + */ +enum flow_eth_dev_profile { + FLOW_ETH_DEV_PROFILE_INLINE = 0, +}; + struct flow_queue_id_s { int id; int hw_id; }; +/* + * RTE_FLOW_ACTION_TYPE_RAW_ENCAP + */ +struct flow_action_raw_encap { + uint8_t *data; + uint8_t *preserve; + size_t size; + struct rte_flow_item items[RAW_ENCAP_DECAP_ELEMS_MAX]; + int item_count; +}; + +/* + * RTE_FLOW_ACTION_TYPE_RAW_DECAP + */ +struct flow_action_raw_decap { + uint8_t *data; + size_t size; + struct rte_flow_item items[RAW_ENCAP_DECAP_ELEMS_MAX]; + int item_count; +}; + struct flow_eth_dev; /* port device */ +struct flow_handle; #endif /* _STREAM_BINARY_FLOW_API_H_ */ diff --git a/drivers/net/ntnic/link_mgmt/link_100g/nt4ga_link_100g.c b/drivers/net/ntnic/link_mgmt/link_100g/nt4ga_link_100g.c index 8964458b47..d8e0cad7cd 100644 --- a/drivers/net/ntnic/link_mgmt/link_100g/nt4ga_link_100g.c +++ b/drivers/net/ntnic/link_mgmt/link_100g/nt4ga_link_100g.c @@ -404,6 +404,14 @@ static int _port_init(adapter_info_t *drv, nthw_fpga_t *fpga, int port) _enable_tx(drv, mac_pcs); _reset_rx(drv, mac_pcs); + /* 2.2) Nt4gaPort::setup() */ + if (nthw_gmf_init(NULL, fpga, port) == 0) { + nthw_gmf_t gmf; + + if (nthw_gmf_init(&gmf, fpga, port) == 0) + nthw_gmf_set_enable(&gmf, true); + } + /* Phase 3. Link state machine steps */ /* 3.1) Create NIM, ::createNim() */ diff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build index 3d9566a52e..ca46541ef3 100644 --- a/drivers/net/ntnic/meson.build +++ b/drivers/net/ntnic/meson.build @@ -17,17 +17,22 @@ includes = [ include_directories('nthw'), include_directories('nthw/supported'), include_directories('nthw/model'), + include_directories('nthw/ntnic_meter'), include_directories('nthw/flow_filter'), + include_directories('nthw/flow_api'), include_directories('nim/'), ] # all sources sources = files( 'adapter/nt4ga_adapter.c', + 'adapter/nt4ga_stat/nt4ga_stat.c', 'dbsconfig/ntnic_dbsconfig.c', 'link_mgmt/link_100g/nt4ga_link_100g.c', 'link_mgmt/nt4ga_link.c', 'nim/i2c_nim.c', + 'ntnic_filter/ntnic_filter.c', + 'ntnic_xstats/ntnic_xstats.c', 'nthw/dbs/nthw_dbs.c', 'nthw/supported/nthw_fpga_9563_055_049_0000.c', 'nthw/supported/nthw_fpga_instances.c', @@ -37,18 +42,31 @@ sources = files( 'nthw/core/nt200a0x/reset/nthw_fpga_rst9563.c', 'nthw/core/nt200a0x/reset/nthw_fpga_rst_nt200a0x.c', 'nthw/core/nthw_fpga.c', + 'nthw/core/nthw_gmf.c', + 'nthw/core/nthw_tsm.c', 'nthw/core/nthw_gpio_phy.c', 'nthw/core/nthw_hif.c', 'nthw/core/nthw_i2cm.c', 'nthw/core/nthw_iic.c', 'nthw/core/nthw_mac_pcs.c', 'nthw/core/nthw_pcie3.c', + 'nthw/core/nthw_rpf.c', 'nthw/core/nthw_rmc.c', 'nthw/core/nthw_sdc.c', 'nthw/core/nthw_si5340.c', + 'nthw/stat/nthw_stat.c', 'nthw/flow_api/flow_api.c', + 'nthw/flow_api/flow_group.c', + 'nthw/flow_api/flow_id_table.c', + 'nthw/flow_api/hw_mod/hw_mod_backend.c', + 'nthw/flow_api/profile_inline/flm_age_queue.c', + 'nthw/flow_api/profile_inline/flm_lrn_queue.c', + 'nthw/flow_api/profile_inline/flm_evt_queue.c', + 'nthw/flow_api/profile_inline/flow_api_profile_inline.c', + 'nthw/flow_api/profile_inline/flow_api_hw_db_inline.c', 'nthw/flow_api/flow_backend/flow_backend.c', 'nthw/flow_api/flow_filter.c', + 'nthw/flow_api/flow_hasher.c', 'nthw/flow_api/flow_kcc.c', 'nthw/flow_api/flow_km.c', 'nthw/flow_api/hw_mod/hw_mod_backend.c', @@ -75,10 +93,12 @@ sources = files( 'nthw/flow_filter/flow_nthw_tx_cpy.c', 'nthw/flow_filter/flow_nthw_tx_ins.c', 'nthw/flow_filter/flow_nthw_tx_rpl.c', + 'nthw/ntnic_meter/ntnic_meter.c', 'nthw/model/nthw_fpga_model.c', 'nthw/nthw_platform.c', 'nthw/nthw_rac.c', 'ntlog/ntlog.c', + 'ntnic_filter/ntnic_filter.c', 'ntutil/nt_util.c', 'ntnic_mod_reg.c', 'ntnic_vfio.c', diff --git a/drivers/net/ntnic/nthw/core/include/nthw_core.h b/drivers/net/ntnic/nthw/core/include/nthw_core.h index fe32891712..4073f9632c 100644 --- a/drivers/net/ntnic/nthw/core/include/nthw_core.h +++ b/drivers/net/ntnic/nthw/core/include/nthw_core.h @@ -17,6 +17,7 @@ #include "nthw_iic.h" #include "nthw_i2cm.h" +#include "nthw_gmf.h" #include "nthw_gpio_phy.h" #include "nthw_mac_pcs.h" #include "nthw_sdc.h" diff --git a/drivers/net/ntnic/nthw/core/include/nthw_gmf.h b/drivers/net/ntnic/nthw/core/include/nthw_gmf.h new file mode 100644 index 0000000000..cc5be85154 --- /dev/null +++ b/drivers/net/ntnic/nthw/core/include/nthw_gmf.h @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef __NTHW_GMF_H__ +#define __NTHW_GMF_H__ + +struct nthw_gmf { + nthw_fpga_t *mp_fpga; + nthw_module_t *mp_mod_gmf; + int mn_instance; + + nthw_register_t *mp_ctrl; + nthw_field_t *mp_ctrl_enable; + nthw_field_t *mp_ctrl_ifg_enable; + nthw_field_t *mp_ctrl_ifg_tx_now_always; + nthw_field_t *mp_ctrl_ifg_tx_on_ts_always; + nthw_field_t *mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock; + nthw_field_t *mp_ctrl_ifg_auto_adjust_enable; + nthw_field_t *mp_ctrl_ts_inject_always; + nthw_field_t *mp_ctrl_fcs_always; + + nthw_register_t *mp_speed; + nthw_field_t *mp_speed_ifg_speed; + + nthw_register_t *mp_ifg_clock_delta; + nthw_field_t *mp_ifg_clock_delta_delta; + + nthw_register_t *mp_ifg_clock_delta_adjust; + nthw_field_t *mp_ifg_clock_delta_adjust_delta; + + nthw_register_t *mp_ifg_max_adjust_slack; + nthw_field_t *mp_ifg_max_adjust_slack_slack; + + nthw_register_t *mp_debug_lane_marker; + nthw_field_t *mp_debug_lane_marker_compensation; + + nthw_register_t *mp_stat_sticky; + nthw_field_t *mp_stat_sticky_data_underflowed; + nthw_field_t *mp_stat_sticky_ifg_adjusted; + + nthw_register_t *mp_stat_next_pkt; + nthw_field_t *mp_stat_next_pkt_ns; + + nthw_register_t *mp_stat_max_delayed_pkt; + nthw_field_t *mp_stat_max_delayed_pkt_ns; + + nthw_register_t *mp_ts_inject; + nthw_field_t *mp_ts_inject_offset; + nthw_field_t *mp_ts_inject_pos; + int mn_param_gmf_ifg_speed_mul; + int mn_param_gmf_ifg_speed_div; + + bool m_administrative_block; /* Used to enforce license expiry */ +}; + +typedef struct nthw_gmf nthw_gmf_t; + +int nthw_gmf_init(nthw_gmf_t *p, nthw_fpga_t *p_fpga, int n_instance); + +void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable); + +#endif /* __NTHW_GMF_H__ */ diff --git a/drivers/net/ntnic/nthw/core/include/nthw_i2cm.h b/drivers/net/ntnic/nthw/core/include/nthw_i2cm.h index 6e0ec4cf5e..eeb4dffe25 100644 --- a/drivers/net/ntnic/nthw/core/include/nthw_i2cm.h +++ b/drivers/net/ntnic/nthw/core/include/nthw_i2cm.h @@ -7,7 +7,7 @@ #define __NTHW_II2CM_H__ #include "nthw_fpga_model.h" -#include "pthread.h" +#include "rte_spinlock.h" struct nt_i2cm { nthw_fpga_t *mp_fpga; @@ -39,7 +39,7 @@ struct nt_i2cm { nthw_field_t *mp_fld_io_exp_rst; nthw_field_t *mp_fld_io_exp_int_b; - pthread_mutex_t i2cmmutex; + rte_spinlock_t i2cmmutex; }; typedef struct nt_i2cm nthw_i2cm_t; diff --git a/drivers/net/ntnic/nthw/core/include/nthw_rmc.h b/drivers/net/ntnic/nthw/core/include/nthw_rmc.h index 2345820bdc..9c40804cd9 100644 --- a/drivers/net/ntnic/nthw/core/include/nthw_rmc.h +++ b/drivers/net/ntnic/nthw/core/include/nthw_rmc.h @@ -44,6 +44,12 @@ typedef struct nthw_rmc nthw_rmc; nthw_rmc_t *nthw_rmc_new(void); int nthw_rmc_init(nthw_rmc_t *p, nthw_fpga_t *p_fpga, int n_instance); +void nthw_rmc_block(nthw_rmc_t *p); void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary); +uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p); +uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p); +uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p); +uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p); + #endif /* NTHW_RMC_H_ */ diff --git a/drivers/net/ntnic/nthw/core/include/nthw_rpf.h b/drivers/net/ntnic/nthw/core/include/nthw_rpf.h new file mode 100644 index 0000000000..f893ac1c47 --- /dev/null +++ b/drivers/net/ntnic/nthw/core/include/nthw_rpf.h @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef NTHW_RPF_HPP_ +#define NTHW_RPF_HPP_ + +#include + +#include "nthw_fpga_model.h" + +struct nthw_rpf { + nthw_fpga_t *mp_fpga; + + nthw_module_t *m_mod_rpf; + + int mn_instance; + + nthw_register_t *mp_reg_control; + nthw_field_t *mp_fld_control_pen; + nthw_field_t *mp_fld_control_rpp_en; + nthw_field_t *mp_fld_control_st_tgl_en; + nthw_field_t *mp_fld_control_keep_alive_en; + + nthw_register_t *mp_ts_sort_prg; + nthw_field_t *mp_fld_ts_sort_prg_maturing_delay; + nthw_field_t *mp_fld_ts_sort_prg_ts_at_eof; + + int m_default_maturing_delay; + bool m_administrative_block; /* used to enforce license expiry */ + + rte_spinlock_t rpf_mutex; +}; + +typedef struct nthw_rpf nthw_rpf_t; +typedef struct nthw_rpf nt_rpf; + +nthw_rpf_t *nthw_rpf_new(void); +void nthw_rpf_delete(nthw_rpf_t *p); +int nthw_rpf_init(nthw_rpf_t *p, nthw_fpga_t *p_fpga, int n_instance); +void nthw_rpf_administrative_block(nthw_rpf_t *p); +void nthw_rpf_block(nthw_rpf_t *p); +void nthw_rpf_unblock(nthw_rpf_t *p); +void nthw_rpf_set_maturing_delay(nthw_rpf_t *p, int32_t delay); +int32_t nthw_rpf_get_maturing_delay(nthw_rpf_t *p); +void nthw_rpf_set_ts_at_eof(nthw_rpf_t *p, bool enable); +bool nthw_rpf_get_ts_at_eof(nthw_rpf_t *p); + +#endif diff --git a/drivers/net/ntnic/nthw/core/include/nthw_tsm.h b/drivers/net/ntnic/nthw/core/include/nthw_tsm.h new file mode 100644 index 0000000000..0a3bcdcaf5 --- /dev/null +++ b/drivers/net/ntnic/nthw/core/include/nthw_tsm.h @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef __NTHW_TSM_H__ +#define __NTHW_TSM_H__ + +#include "stdint.h" + +#include "nthw_fpga_model.h" + +struct nthw_tsm { + nthw_fpga_t *mp_fpga; + nthw_module_t *mp_mod_tsm; + int mn_instance; + + nthw_field_t *mp_fld_config_ts_format; + + nthw_field_t *mp_fld_timer_ctrl_timer_en_t0; + nthw_field_t *mp_fld_timer_ctrl_timer_en_t1; + + nthw_field_t *mp_fld_timer_timer_t0_max_count; + + nthw_field_t *mp_fld_timer_timer_t1_max_count; + + nthw_register_t *mp_reg_ts_lo; + nthw_field_t *mp_fld_ts_lo; + + nthw_register_t *mp_reg_ts_hi; + nthw_field_t *mp_fld_ts_hi; + + nthw_register_t *mp_reg_time_lo; + nthw_field_t *mp_fld_time_lo; + + nthw_register_t *mp_reg_time_hi; + nthw_field_t *mp_fld_time_hi; +}; + +typedef struct nthw_tsm nthw_tsm_t; +typedef struct nthw_tsm nthw_tsm; + +nthw_tsm_t *nthw_tsm_new(void); +int nthw_tsm_init(nthw_tsm_t *p, nthw_fpga_t *p_fpga, int n_instance); + +int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts); +int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time); + +int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable); +int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val); +int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable); +int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val); + +int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val); + +#endif /* __NTHW_TSM_H__ */ diff --git a/drivers/net/ntnic/nthw/core/nthw_fpga.c b/drivers/net/ntnic/nthw/core/nthw_fpga.c index 9448c29de1..ca69a9d5b1 100644 --- a/drivers/net/ntnic/nthw/core/nthw_fpga.c +++ b/drivers/net/ntnic/nthw/core/nthw_fpga.c @@ -13,6 +13,8 @@ #include "nthw_fpga_instances.h" #include "nthw_fpga_mod_str_map.h" +#include "nthw_tsm.h" + #include int nthw_fpga_get_param_info(struct fpga_info_s *p_fpga_info, nthw_fpga_t *p_fpga) @@ -179,6 +181,7 @@ int nthw_fpga_init(struct fpga_info_s *p_fpga_info) nthw_hif_t *p_nthw_hif = NULL; nthw_pcie3_t *p_nthw_pcie3 = NULL; nthw_rac_t *p_nthw_rac = NULL; + nthw_tsm_t *p_nthw_tsm = NULL; mcu_info_t *p_mcu_info = &p_fpga_info->mcu_info; uint64_t n_fpga_ident = 0; @@ -331,6 +334,50 @@ int nthw_fpga_init(struct fpga_info_s *p_fpga_info) p_fpga_info->mp_nthw_hif = p_nthw_hif; + p_nthw_tsm = nthw_tsm_new(); + + if (p_nthw_tsm) { + nthw_tsm_init(p_nthw_tsm, p_fpga, 0); + + nthw_tsm_set_config_ts_format(p_nthw_tsm, 1); /* 1 = TSM: TS format native */ + + /* Timer T0 - stat toggle timer */ + nthw_tsm_set_timer_t0_enable(p_nthw_tsm, false); + nthw_tsm_set_timer_t0_max_count(p_nthw_tsm, 50 * 1000 * 1000); /* ns */ + nthw_tsm_set_timer_t0_enable(p_nthw_tsm, true); + + /* Timer T1 - keep alive timer */ + nthw_tsm_set_timer_t1_enable(p_nthw_tsm, false); + nthw_tsm_set_timer_t1_max_count(p_nthw_tsm, 100 * 1000 * 1000); /* ns */ + nthw_tsm_set_timer_t1_enable(p_nthw_tsm, true); + } + + p_fpga_info->mp_nthw_tsm = p_nthw_tsm; + + /* TSM sample triggering: test validation... */ +#if defined(DEBUG) && (1) + { + uint64_t n_time, n_ts; + int i; + + for (i = 0; i < 4; i++) { + if (p_nthw_hif) + nthw_hif_trigger_sample_time(p_nthw_hif); + + else if (p_nthw_pcie3) + nthw_pcie3_trigger_sample_time(p_nthw_pcie3); + + nthw_tsm_get_time(p_nthw_tsm, &n_time); + nthw_tsm_get_ts(p_nthw_tsm, &n_ts); + + NT_LOG(DBG, NTHW, "%s: TSM time: %016" PRIX64 " %016" PRIX64 "\n", + p_adapter_id_str, n_time, n_ts); + + nt_os_wait_usec(1000); + } + } +#endif + return res; } diff --git a/drivers/net/ntnic/nthw/core/nthw_gmf.c b/drivers/net/ntnic/nthw/core/nthw_gmf.c new file mode 100644 index 0000000000..16a4c288bd --- /dev/null +++ b/drivers/net/ntnic/nthw/core/nthw_gmf.c @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include +#include +#include "ntlog.h" + +#include "nthw_drv.h" +#include "nthw_register.h" + +#include "nthw_gmf.h" + +int nthw_gmf_init(nthw_gmf_t *p, nthw_fpga_t *p_fpga, int n_instance) +{ + nthw_module_t *mod = nthw_fpga_query_module(p_fpga, MOD_GMF, n_instance); + + if (p == NULL) + return mod == NULL ? -1 : 0; + + if (mod == NULL) { + NT_LOG(ERR, NTHW, "%s: GMF %d: no such instance", + p_fpga->p_fpga_info->mp_adapter_id_str, n_instance); + return -1; + } + + p->mp_fpga = p_fpga; + p->mn_instance = n_instance; + p->mp_mod_gmf = mod; + + p->mp_ctrl = nthw_module_get_register(p->mp_mod_gmf, GMF_CTRL); + p->mp_ctrl_enable = nthw_register_get_field(p->mp_ctrl, GMF_CTRL_ENABLE); + p->mp_ctrl_ifg_enable = nthw_register_get_field(p->mp_ctrl, GMF_CTRL_IFG_ENABLE); + p->mp_ctrl_ifg_auto_adjust_enable = + nthw_register_get_field(p->mp_ctrl, GMF_CTRL_IFG_AUTO_ADJUST_ENABLE); + p->mp_ctrl_ts_inject_always = + nthw_register_query_field(p->mp_ctrl, GMF_CTRL_TS_INJECT_ALWAYS); + p->mp_ctrl_fcs_always = nthw_register_query_field(p->mp_ctrl, GMF_CTRL_FCS_ALWAYS); + + p->mp_speed = nthw_module_get_register(p->mp_mod_gmf, GMF_SPEED); + p->mp_speed_ifg_speed = nthw_register_get_field(p->mp_speed, GMF_SPEED_IFG_SPEED); + + p->mp_ifg_clock_delta = nthw_module_get_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA); + p->mp_ifg_clock_delta_delta = + nthw_register_get_field(p->mp_ifg_clock_delta, GMF_IFG_SET_CLOCK_DELTA_DELTA); + + p->mp_ifg_max_adjust_slack = + nthw_module_get_register(p->mp_mod_gmf, GMF_IFG_MAX_ADJUST_SLACK); + p->mp_ifg_max_adjust_slack_slack = nthw_register_get_field(p->mp_ifg_max_adjust_slack, + GMF_IFG_MAX_ADJUST_SLACK_SLACK); + + p->mp_debug_lane_marker = nthw_module_get_register(p->mp_mod_gmf, GMF_DEBUG_LANE_MARKER); + p->mp_debug_lane_marker_compensation = + nthw_register_get_field(p->mp_debug_lane_marker, + GMF_DEBUG_LANE_MARKER_COMPENSATION); + + p->mp_stat_sticky = nthw_module_get_register(p->mp_mod_gmf, GMF_STAT_STICKY); + p->mp_stat_sticky_data_underflowed = + nthw_register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_DATA_UNDERFLOWED); + p->mp_stat_sticky_ifg_adjusted = + nthw_register_get_field(p->mp_stat_sticky, GMF_STAT_STICKY_IFG_ADJUSTED); + + p->mn_param_gmf_ifg_speed_mul = + nthw_fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_MUL, 1); + p->mn_param_gmf_ifg_speed_div = + nthw_fpga_get_product_param(p_fpga, NT_GMF_IFG_SPEED_DIV, 1); + + p->m_administrative_block = false; + + p->mp_stat_next_pkt = nthw_module_query_register(p->mp_mod_gmf, GMF_STAT_NEXT_PKT); + + if (p->mp_stat_next_pkt) { + p->mp_stat_next_pkt_ns = + nthw_register_query_field(p->mp_stat_next_pkt, GMF_STAT_NEXT_PKT_NS); + + } else { + p->mp_stat_next_pkt_ns = NULL; + } + + p->mp_stat_max_delayed_pkt = + nthw_module_query_register(p->mp_mod_gmf, GMF_STAT_MAX_DELAYED_PKT); + + if (p->mp_stat_max_delayed_pkt) { + p->mp_stat_max_delayed_pkt_ns = + nthw_register_query_field(p->mp_stat_max_delayed_pkt, + GMF_STAT_MAX_DELAYED_PKT_NS); + + } else { + p->mp_stat_max_delayed_pkt_ns = NULL; + } + + p->mp_ctrl_ifg_tx_now_always = + nthw_register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_NOW_ALWAYS); + p->mp_ctrl_ifg_tx_on_ts_always = + nthw_register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ALWAYS); + + p->mp_ctrl_ifg_tx_on_ts_adjust_on_set_clock = + nthw_register_query_field(p->mp_ctrl, GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK); + + p->mp_ifg_clock_delta_adjust = + nthw_module_query_register(p->mp_mod_gmf, GMF_IFG_SET_CLOCK_DELTA_ADJUST); + + if (p->mp_ifg_clock_delta_adjust) { + p->mp_ifg_clock_delta_adjust_delta = + nthw_register_query_field(p->mp_ifg_clock_delta_adjust, + GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA); + + } else { + p->mp_ifg_clock_delta_adjust_delta = NULL; + } + + p->mp_ts_inject = nthw_module_query_register(p->mp_mod_gmf, GMF_TS_INJECT); + + if (p->mp_ts_inject) { + p->mp_ts_inject_offset = + nthw_register_query_field(p->mp_ts_inject, GMF_TS_INJECT_OFFSET); + p->mp_ts_inject_pos = + nthw_register_query_field(p->mp_ts_inject, GMF_TS_INJECT_POS); + + } else { + p->mp_ts_inject_offset = NULL; + p->mp_ts_inject_pos = NULL; + } + + return 0; +} + +void nthw_gmf_set_enable(nthw_gmf_t *p, bool enable) +{ + if (!p->m_administrative_block) + nthw_field_set_val_flush32(p->mp_ctrl_enable, enable ? 1 : 0); +} diff --git a/drivers/net/ntnic/nthw/core/nthw_rmc.c b/drivers/net/ntnic/nthw/core/nthw_rmc.c index 4a01424c24..570a179fc8 100644 --- a/drivers/net/ntnic/nthw/core/nthw_rmc.c +++ b/drivers/net/ntnic/nthw/core/nthw_rmc.c @@ -77,6 +77,36 @@ int nthw_rmc_init(nthw_rmc_t *p, nthw_fpga_t *p_fpga, int n_instance) return 0; } +uint32_t nthw_rmc_get_status_sf_ram_of(nthw_rmc_t *p) +{ + return (p->mp_reg_status) ? nthw_field_get_updated(p->mp_fld_sf_ram_of) : 0xffffffff; +} + +uint32_t nthw_rmc_get_status_descr_fifo_of(nthw_rmc_t *p) +{ + return (p->mp_reg_status) ? nthw_field_get_updated(p->mp_fld_descr_fifo_of) : 0xffffffff; +} + +uint32_t nthw_rmc_get_dbg_merge(nthw_rmc_t *p) +{ + return (p->mp_reg_dbg) ? nthw_field_get_updated(p->mp_fld_dbg_merge) : 0xffffffff; +} + +uint32_t nthw_rmc_get_mac_if_err(nthw_rmc_t *p) +{ + return (p->mp_reg_mac_if) ? nthw_field_get_updated(p->mp_fld_mac_if_err) : 0xffffffff; +} + +void nthw_rmc_block(nthw_rmc_t *p) +{ + /* BLOCK_STATT(0)=1 BLOCK_KEEPA(1)=1 BLOCK_MAC_PORT(8:11)=~0 */ + if (!p->mb_administrative_block) { + nthw_field_set_flush(p->mp_fld_ctrl_block_stat_drop); + nthw_field_set_flush(p->mp_fld_ctrl_block_keep_alive); + nthw_field_set_flush(p->mp_fld_ctrl_block_mac_port); + } +} + void nthw_rmc_unblock(nthw_rmc_t *p, bool b_is_secondary) { uint32_t n_block_mask = ~0U << (b_is_secondary ? p->mn_nims : p->mn_ports); diff --git a/drivers/net/ntnic/nthw/core/nthw_rpf.c b/drivers/net/ntnic/nthw/core/nthw_rpf.c new file mode 100644 index 0000000000..1ed4d7b4e0 --- /dev/null +++ b/drivers/net/ntnic/nthw/core/nthw_rpf.c @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include "ntlog.h" + +#include "nthw_drv.h" +#include "nthw_register.h" +#include "nthw_rpf.h" +#include "rte_spinlock.h" + +nthw_rpf_t *nthw_rpf_new(void) +{ + nthw_rpf_t *p = malloc(sizeof(nthw_rpf_t)); + + if (p) + memset(p, 0, sizeof(nthw_rpf_t)); + + return p; +} + +void nthw_rpf_delete(nthw_rpf_t *p) +{ + if (p) { + memset(p, 0, sizeof(nthw_rpf_t)); + free(p); + } +} + +int nthw_rpf_init(nthw_rpf_t *p, nthw_fpga_t *p_fpga, int n_instance) +{ + nthw_module_t *p_mod = nthw_fpga_query_module(p_fpga, MOD_RPF, n_instance); + + if (p == NULL) + return p_mod == NULL ? -1 : 0; + + if (p_mod == NULL) { + NT_LOG(ERR, NTHW, "%s: MOD_RPF %d: no such instance", + p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance); + return -1; + } + + p->m_mod_rpf = p_mod; + + p->mp_fpga = p_fpga; + + p->m_administrative_block = false; + + /* CONTROL */ + p->mp_reg_control = nthw_module_get_register(p->m_mod_rpf, RPF_CONTROL); + p->mp_fld_control_pen = nthw_register_get_field(p->mp_reg_control, RPF_CONTROL_PEN); + p->mp_fld_control_rpp_en = nthw_register_get_field(p->mp_reg_control, RPF_CONTROL_RPP_EN); + p->mp_fld_control_st_tgl_en = + nthw_register_get_field(p->mp_reg_control, RPF_CONTROL_ST_TGL_EN); + p->mp_fld_control_keep_alive_en = + nthw_register_get_field(p->mp_reg_control, RPF_CONTROL_KEEP_ALIVE_EN); + + /* TS_SORT_PRG */ + p->mp_ts_sort_prg = nthw_module_get_register(p->m_mod_rpf, RPF_TS_SORT_PRG); + p->mp_fld_ts_sort_prg_maturing_delay = + nthw_register_get_field(p->mp_ts_sort_prg, RPF_TS_SORT_PRG_MATURING_DELAY); + p->mp_fld_ts_sort_prg_ts_at_eof = + nthw_register_get_field(p->mp_ts_sort_prg, RPF_TS_SORT_PRG_TS_AT_EOF); + p->m_default_maturing_delay = + nthw_fpga_get_product_param(p_fpga, NT_RPF_MATURING_DEL_DEFAULT, 0); + + /* Initialize mutex */ + rte_spinlock_init(&p->rpf_mutex); + return 0; +} + +void nthw_rpf_administrative_block(nthw_rpf_t *p) +{ + /* block all MAC ports */ + nthw_register_update(p->mp_reg_control); + nthw_field_set_val_flush32(p->mp_fld_control_pen, 0); + + p->m_administrative_block = true; +} + +void nthw_rpf_block(nthw_rpf_t *p) +{ + nthw_register_update(p->mp_reg_control); + nthw_field_set_val_flush32(p->mp_fld_control_pen, 0); +} + +void nthw_rpf_unblock(nthw_rpf_t *p) +{ + nthw_register_update(p->mp_reg_control); + + nthw_field_set_val32(p->mp_fld_control_pen, ~0U); + nthw_field_set_val32(p->mp_fld_control_rpp_en, ~0U); + nthw_field_set_val32(p->mp_fld_control_st_tgl_en, 1); + nthw_field_set_val_flush32(p->mp_fld_control_keep_alive_en, 1); +} + +void nthw_rpf_set_maturing_delay(nthw_rpf_t *p, int32_t delay) +{ + nthw_register_update(p->mp_ts_sort_prg); + nthw_field_set_val_flush32(p->mp_fld_ts_sort_prg_maturing_delay, (uint32_t)delay); +} + +int32_t nthw_rpf_get_maturing_delay(nthw_rpf_t *p) +{ + nthw_register_update(p->mp_ts_sort_prg); + /* Maturing delay is a two's complement 18 bit value, so we retrieve it as signed */ + return nthw_field_get_signed(p->mp_fld_ts_sort_prg_maturing_delay); +} + +void nthw_rpf_set_ts_at_eof(nthw_rpf_t *p, bool enable) +{ + nthw_register_update(p->mp_ts_sort_prg); + nthw_field_set_val_flush32(p->mp_fld_ts_sort_prg_ts_at_eof, enable); +} + +bool nthw_rpf_get_ts_at_eof(nthw_rpf_t *p) +{ + return nthw_field_get_updated(p->mp_fld_ts_sort_prg_ts_at_eof); +} diff --git a/drivers/net/ntnic/nthw/core/nthw_tsm.c b/drivers/net/ntnic/nthw/core/nthw_tsm.c new file mode 100644 index 0000000000..b88dcb9b0b --- /dev/null +++ b/drivers/net/ntnic/nthw/core/nthw_tsm.c @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include "ntlog.h" + +#include "nthw_drv.h" +#include "nthw_register.h" + +#include "nthw_tsm.h" + +nthw_tsm_t *nthw_tsm_new(void) +{ + nthw_tsm_t *p = malloc(sizeof(nthw_tsm_t)); + + if (p) + memset(p, 0, sizeof(nthw_tsm_t)); + + return p; +} + +int nthw_tsm_init(nthw_tsm_t *p, nthw_fpga_t *p_fpga, int n_instance) +{ + const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str; + nthw_module_t *mod = nthw_fpga_query_module(p_fpga, MOD_TSM, n_instance); + + if (p == NULL) + return mod == NULL ? -1 : 0; + + if (mod == NULL) { + NT_LOG(ERR, NTHW, "%s: TSM %d: no such instance", p_adapter_id_str, n_instance); + return -1; + } + + p->mp_fpga = p_fpga; + p->mn_instance = n_instance; + p->mp_mod_tsm = mod; + + { + nthw_register_t *p_reg; + + p_reg = nthw_module_get_register(p->mp_mod_tsm, TSM_CONFIG); + p->mp_fld_config_ts_format = nthw_register_get_field(p_reg, TSM_CONFIG_TS_FORMAT); + + p_reg = nthw_module_get_register(p->mp_mod_tsm, TSM_TIMER_CTRL); + p->mp_fld_timer_ctrl_timer_en_t0 = + nthw_register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T0); + p->mp_fld_timer_ctrl_timer_en_t1 = + nthw_register_get_field(p_reg, TSM_TIMER_CTRL_TIMER_EN_T1); + + p_reg = nthw_module_get_register(p->mp_mod_tsm, TSM_TIMER_T0); + p->mp_fld_timer_timer_t0_max_count = + nthw_register_get_field(p_reg, TSM_TIMER_T0_MAX_COUNT); + + p_reg = nthw_module_get_register(p->mp_mod_tsm, TSM_TIMER_T1); + p->mp_fld_timer_timer_t1_max_count = + nthw_register_get_field(p_reg, TSM_TIMER_T1_MAX_COUNT); + + p->mp_reg_time_lo = nthw_module_get_register(p->mp_mod_tsm, TSM_TIME_LO); + p_reg = p->mp_reg_time_lo; + p->mp_fld_time_lo = nthw_register_get_field(p_reg, TSM_TIME_LO_NS); + + p->mp_reg_time_hi = nthw_module_get_register(p->mp_mod_tsm, TSM_TIME_HI); + p_reg = p->mp_reg_time_hi; + p->mp_fld_time_hi = nthw_register_get_field(p_reg, TSM_TIME_HI_SEC); + + p->mp_reg_ts_lo = nthw_module_get_register(p->mp_mod_tsm, TSM_TS_LO); + p_reg = p->mp_reg_ts_lo; + p->mp_fld_ts_lo = nthw_register_get_field(p_reg, TSM_TS_LO_TIME); + + p->mp_reg_ts_hi = nthw_module_get_register(p->mp_mod_tsm, TSM_TS_HI); + p_reg = p->mp_reg_ts_hi; + p->mp_fld_ts_hi = nthw_register_get_field(p_reg, TSM_TS_HI_TIME); + } + return 0; +} + +int nthw_tsm_get_ts(nthw_tsm_t *p, uint64_t *p_ts) +{ + uint32_t n_ts_lo, n_ts_hi; + uint64_t val; + + if (!p_ts) + return -1; + + n_ts_lo = nthw_field_get_updated(p->mp_fld_ts_lo); + n_ts_hi = nthw_field_get_updated(p->mp_fld_ts_hi); + + val = ((((uint64_t)n_ts_hi) << 32UL) | n_ts_lo); + + if (p_ts) + *p_ts = val; + + return 0; +} + +int nthw_tsm_get_time(nthw_tsm_t *p, uint64_t *p_time) +{ + uint32_t n_time_lo, n_time_hi; + uint64_t val; + + if (!p_time) + return -1; + + n_time_lo = nthw_field_get_updated(p->mp_fld_time_lo); + n_time_hi = nthw_field_get_updated(p->mp_fld_time_hi); + + val = ((((uint64_t)n_time_hi) << 32UL) | n_time_lo); + + if (p_time) + *p_time = val; + + return 0; +} + +int nthw_tsm_set_timer_t0_enable(nthw_tsm_t *p, bool b_enable) +{ + nthw_field_update_register(p->mp_fld_timer_ctrl_timer_en_t0); + + if (b_enable) + nthw_field_set_flush(p->mp_fld_timer_ctrl_timer_en_t0); + + else + nthw_field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t0); + + return 0; +} + +int nthw_tsm_set_timer_t0_max_count(nthw_tsm_t *p, uint32_t n_timer_val) +{ + /* Timer T0 - stat toggle timer */ + nthw_field_update_register(p->mp_fld_timer_timer_t0_max_count); + nthw_field_set_val_flush32(p->mp_fld_timer_timer_t0_max_count, + n_timer_val); /* ns (50*1000*1000) */ + return 0; +} + +int nthw_tsm_set_timer_t1_enable(nthw_tsm_t *p, bool b_enable) +{ + nthw_field_update_register(p->mp_fld_timer_ctrl_timer_en_t1); + + if (b_enable) + nthw_field_set_flush(p->mp_fld_timer_ctrl_timer_en_t1); + + else + nthw_field_clr_flush(p->mp_fld_timer_ctrl_timer_en_t1); + + return 0; +} + +int nthw_tsm_set_timer_t1_max_count(nthw_tsm_t *p, uint32_t n_timer_val) +{ + /* Timer T1 - keep alive timer */ + nthw_field_update_register(p->mp_fld_timer_timer_t1_max_count); + nthw_field_set_val_flush32(p->mp_fld_timer_timer_t1_max_count, + n_timer_val); /* ns (100*1000*1000) */ + return 0; +} + +int nthw_tsm_set_config_ts_format(nthw_tsm_t *p, uint32_t n_val) +{ + nthw_field_update_register(p->mp_fld_config_ts_format); + /* 0x1: Native - 10ns units, start date: 1970-01-01. */ + nthw_field_set_val_flush32(p->mp_fld_config_ts_format, n_val); + return 0; +} diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c index 34e84559eb..5aaf3c2f23 100644 --- a/drivers/net/ntnic/nthw/flow_api/flow_api.c +++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c @@ -2,13 +2,22 @@ * SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2023 Napatech A/S */ +#include "rte_spinlock.h" +#include "ntlog.h" +#include "nt_util.h" #include "flow_api_engine.h" #include "flow_api_nic_setup.h" #include "ntnic_mod_reg.h" +#include "flow_api.h" #include "flow_filter.h" +#define RSS_TO_STRING(name) \ + { \ + name, #name \ + } + const char *dbg_res_descr[] = { /* RES_QUEUE */ "RES_QUEUE", /* RES_CAT_CFN */ "RES_CAT_CFN", @@ -33,13 +42,122 @@ const char *dbg_res_descr[] = { }; static struct flow_nic_dev *dev_base; -static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER; +static rte_spinlock_t base_mtx = RTE_SPINLOCK_INITIALIZER; + +/* + * Error handling + */ + +static const struct { + const char *message; +} err_msg[] = { + /* 00 */ { "Operation successfully completed" }, + /* 01 */ { "Operation failed" }, + /* 02 */ { "Memory allocation failed" }, + /* 03 */ { "Too many output destinations" }, + /* 04 */ { "Too many output queues for RSS" }, + /* 05 */ { "The VLAN TPID specified is not supported" }, + /* 06 */ { "The VxLan Push header specified is not accepted" }, + /* 07 */ { "While interpreting VxLan Pop action, could not find a destination port" }, + /* 08 */ { "Failed in creating a HW-internal VTEP port" }, + /* 09 */ { "Too many VLAN tag matches" }, + /* 10 */ { "IPv6 invalid header specified" }, + /* 11 */ { "Too many tunnel ports. HW limit reached" }, + /* 12 */ { "Unknown or unsupported flow match element received" }, + /* 13 */ { "Match failed because of HW limitations" }, + /* 14 */ { "Match failed because of HW resource limitations" }, + /* 15 */ { "Match failed because of too complex element definitions" }, + /* 16 */ { "Action failed. To too many output destinations" }, + /* 17 */ { "Action Output failed, due to HW resource exhaustion" }, + /* 18 */ { "Push Tunnel Header action cannot output to multiple destination queues" }, + /* 19 */ { "Inline action HW resource exhaustion" }, + /* 20 */ { "Action retransmit/recirculate HW resource exhaustion" }, + /* 21 */ { "Flow counter HW resource exhaustion" }, + /* 22 */ { "Internal HW resource exhaustion to handle Actions" }, + /* 23 */ { "Internal HW QSL compare failed" }, + /* 24 */ { "Internal CAT CFN reuse failed" }, + /* 25 */ { "Match variations too complex" }, + /* 26 */ { "Match failed because of CAM/TCAM full" }, + /* 27 */ { "Internal creation of a tunnel end point port failed" }, + /* 28 */ { "Unknown or unsupported flow action received" }, + /* 29 */ { "Removing flow failed" }, +}; + +void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct rte_flow_error *error) +{ + assert(msg < ERR_MSG_NO_MSG); + + if (error) { + error->message = err_msg[msg].message; + error->type = (msg == ERR_SUCCESS) ? RTE_FLOW_ERROR_TYPE_NONE : + RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + } +} + +/* + * Resources + */ + +int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, + uint32_t alignment) +{ + for (unsigned int i = 0; i < ndev->res[res_type].resource_count; i += alignment) { + if (!flow_nic_is_resource_used(ndev, res_type, i)) { + flow_nic_mark_resource_used(ndev, res_type, i); + ndev->res[res_type].ref[i] = 1; + return i; + } + } + + return -1; +} + +int flow_nic_alloc_resource_config(struct flow_nic_dev *ndev, enum res_type_e res_type, + unsigned int num, uint32_t alignment) +{ + unsigned int idx_offs; + + for (unsigned int res_idx = 0; res_idx < ndev->res[res_type].resource_count - (num - 1); + res_idx += alignment) { + if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) { + for (idx_offs = 1; idx_offs < num; idx_offs++) + if (flow_nic_is_resource_used(ndev, res_type, res_idx + idx_offs)) + break; + + if (idx_offs < num) + continue; + + /* found a contiguous number of "num" res_type elements - allocate them */ + for (idx_offs = 0; idx_offs < num; idx_offs++) { + flow_nic_mark_resource_used(ndev, res_type, res_idx + idx_offs); + ndev->res[res_type].ref[res_idx + idx_offs] = 1; + } + + return res_idx; + } + } + + return -1; +} void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx) { flow_nic_mark_resource_unused(ndev, res_type, idx); } +int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index) +{ + NT_LOG(DBG, FILTER, "Reference resource %s idx %i (before ref cnt %i)", + dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]); + assert(flow_nic_is_resource_used(ndev, res_type, index)); + + if (ndev->res[res_type].ref[index] == (uint32_t)-1) + return -1; + + ndev->res[res_type].ref[index]++; + return 0; +} + int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index) { NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)", @@ -55,10 +173,122 @@ int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */ } +/* + * Nic port/adapter lookup + */ + +static struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port) +{ + struct flow_nic_dev *nic_dev = dev_base; + + while (nic_dev) { + if (nic_dev->adapter_no == adapter_no) + break; + + nic_dev = nic_dev->next; + } + + if (!nic_dev) + return NULL; + + struct flow_eth_dev *dev = nic_dev->eth_base; + + while (dev) { + if (port == dev->port) + return dev; + + dev = dev->next; + } + + return NULL; +} + +static struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no) +{ + struct flow_nic_dev *ndev = dev_base; + + while (ndev) { + if (adapter_no == ndev->adapter_no) + break; + + ndev = ndev->next; + } + + return ndev; +} +/* + * Flow API + */ + +static struct flow_handle *flow_create(struct flow_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + uint16_t forced_vlan_vid __rte_unused, + uint16_t caller_id __rte_unused, + const struct rte_flow_item item[] __rte_unused, + const struct rte_flow_action action[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + return NULL; + } + + return profile_inline_ops->flow_create_profile_inline(dev, attr, + forced_vlan_vid, caller_id, item, action, error); +} + +static int flow_destroy(struct flow_eth_dev *dev __rte_unused, + struct flow_handle *flow __rte_unused, struct rte_flow_error *error __rte_unused) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + return -1; + } + + return profile_inline_ops->flow_destroy_profile_inline(dev, flow, error); +} + +static int flow_flush(struct flow_eth_dev *dev, uint16_t caller_id, struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_flush_profile_inline(dev, caller_id, error); +} + +static int flow_actions_update(struct flow_eth_dev *dev, + struct flow_handle *flow, + const struct rte_flow_action action[], + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_actions_update_profile_inline(dev, flow, action, error); +} + /* * Device Management API */ +static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *dev) +{ + dev->next = ndev->eth_base; + ndev->eth_base = dev; +} + static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev) { struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL; @@ -83,16 +313,34 @@ static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_de static void flow_ndev_reset(struct flow_nic_dev *ndev) { + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + return; + } + /* Delete all eth-port devices created on this NIC device */ while (ndev->eth_base) flow_delete_eth_dev(ndev->eth_base); + /* Error check */ + while (ndev->flow_base) { + NT_LOG(ERR, FILTER, + "ERROR : Flows still defined but all eth-ports deleted. Flow %p", + ndev->flow_base); + + profile_inline_ops->flow_destroy_profile_inline(ndev->flow_base->dev, + ndev->flow_base, NULL); + } + + profile_inline_ops->done_flow_management_of_ndev_profile_inline(ndev); + km_free_ndev_resource_management(&ndev->km_res_handle); kcc_free_ndev_resource_management(&ndev->kcc_res_handle); ndev->flow_unique_id_counter = 0; -#ifdef FLOW_DEBUG /* * free all resources default allocated, initially for this NIC DEV * Is not really needed since the bitmap will be freed in a sec. Therefore @@ -104,9 +352,7 @@ static void flow_ndev_reset(struct flow_nic_dev *ndev) for (unsigned int i = 0; i < RES_COUNT; i++) { int err = 0; -#if defined(FLOW_DEBUG) NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]); -#endif for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) { int ref = ndev->res[i].ref[ii]; @@ -123,11 +369,17 @@ static void flow_ndev_reset(struct flow_nic_dev *ndev) NT_LOG(DBG, FILTER, "ERROR - some resources not freed"); } -#endif } int flow_delete_eth_dev(struct flow_eth_dev *eth_dev) { + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + return -1; + } + struct flow_nic_dev *ndev = eth_dev->ndev; if (!ndev) { @@ -142,7 +394,21 @@ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev) #endif /* delete all created flows from this device */ - pthread_mutex_lock(&ndev->mtx); + rte_spinlock_lock(&ndev->mtx); + + struct flow_handle *flow = ndev->flow_base; + + while (flow) { + if (flow->dev == eth_dev) { + struct flow_handle *flow_next = flow->next; + profile_inline_ops->flow_destroy_locked_profile_inline(eth_dev, flow, + NULL); + flow = flow_next; + + } else { + flow = flow->next; + } + } /* * remove unmatched queue if setup in QSL @@ -152,25 +418,27 @@ int flow_delete_eth_dev(struct flow_eth_dev *eth_dev) hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0); hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1); -#ifdef FLOW_DEBUG - ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); -#endif - -#ifndef SCATTER_GATHER + if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) { + for (int i = 0; i < eth_dev->num_queues; ++i) { + uint32_t qen_value = 0; + uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id; - /* free rx queues */ - for (int i = 0; i < eth_dev->num_queues; i++) { - ndev->be.iface->free_rx_queue(ndev->be.be_dev, eth_dev->rx_queue[i].hw_id); - flow_nic_deref_resource(ndev, RES_QUEUE, eth_dev->rx_queue[i].id); + hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value); + hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, + qen_value & ~(1U << (queue_id % 4))); + hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1); + } } +#ifdef FLOW_DEBUG + ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); #endif /* take eth_dev out of ndev list */ if (nic_remove_eth_port_dev(ndev, eth_dev) != 0) NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev); - pthread_mutex_unlock(&ndev->mtx); + rte_spinlock_unlock(&ndev->mtx); /* free eth_dev */ free(eth_dev); @@ -211,15 +479,15 @@ static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e re static void list_insert_flow_nic(struct flow_nic_dev *ndev) { - pthread_mutex_lock(&base_mtx); + rte_spinlock_lock(&base_mtx); ndev->next = dev_base; dev_base = ndev; - pthread_mutex_unlock(&base_mtx); + rte_spinlock_unlock(&base_mtx); } static int list_remove_flow_nic(struct flow_nic_dev *ndev) { - pthread_mutex_lock(&base_mtx); + rte_spinlock_lock(&base_mtx); struct flow_nic_dev *nic_dev = dev_base, *prev = NULL; while (nic_dev) { @@ -230,7 +498,7 @@ static int list_remove_flow_nic(struct flow_nic_dev *ndev) else dev_base = nic_dev->next; - pthread_mutex_unlock(&base_mtx); + rte_spinlock_unlock(&base_mtx); return 0; } @@ -238,10 +506,164 @@ static int list_remove_flow_nic(struct flow_nic_dev *ndev) nic_dev = nic_dev->next; } - pthread_mutex_unlock(&base_mtx); + rte_spinlock_unlock(&base_mtx); return -1; } +/* + * adapter_no physical adapter no + * port_no local port no + * alloc_rx_queues number of rx-queues to allocate for this eth_dev + */ +static struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no, uint32_t port_id, + int alloc_rx_queues, struct flow_queue_id_s queue_ids[], + int *rss_target_id, enum flow_eth_dev_profile flow_profile, + uint32_t exception_path) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + + int i; + struct flow_eth_dev *eth_dev = NULL; + + NT_LOG(DBG, FILTER, + "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i", + adapter_no, port_no, port_id, alloc_rx_queues, flow_profile); + + if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) { + assert(0); + NT_LOG(ERR, FILTER, + "ERROR: Internal array for multiple queues too small for API"); + } + + rte_spinlock_lock(&base_mtx); + struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no); + + if (!ndev) { + /* Error - no flow api found on specified adapter */ + NT_LOG(ERR, FILTER, "ERROR: no flow interface registered for adapter %d", + adapter_no); + rte_spinlock_unlock(&base_mtx); + return NULL; + } + + if (ndev->ports < ((uint16_t)port_no + 1)) { + NT_LOG(ERR, FILTER, "ERROR: port exceeds supported port range for adapter"); + rte_spinlock_unlock(&base_mtx); + return NULL; + } + + if ((alloc_rx_queues - 1) > FLOW_MAX_QUEUES) { /* 0th is exception so +1 */ + NT_LOG(ERR, FILTER, + "ERROR: Exceeds supported number of rx queues per eth device"); + rte_spinlock_unlock(&base_mtx); + return NULL; + } + + /* don't accept multiple eth_dev's on same NIC and same port */ + eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no); + + if (eth_dev) { + NT_LOG(DBG, FILTER, "Re-opening existing NIC port device: NIC DEV: %i Port %i", + adapter_no, port_no); + flow_delete_eth_dev(eth_dev); + eth_dev = NULL; + } + + rte_spinlock_lock(&ndev->mtx); + + eth_dev = calloc(1, sizeof(struct flow_eth_dev)); + + if (!eth_dev) { + NT_LOG(ERR, FILTER, "ERROR: calloc failed"); + goto err_exit0; + } + + eth_dev->ndev = ndev; + eth_dev->port = port_no; + eth_dev->port_id = port_id; + + /* First time then NIC is initialized */ + if (!ndev->flow_mgnt_prepared) { + ndev->flow_profile = flow_profile; + + /* Initialize modules if needed - recipe 0 is used as no-match and must be setup */ + if (profile_inline_ops != NULL && + profile_inline_ops->initialize_flow_management_of_ndev_profile_inline(ndev)) + goto err_exit0; + + } else { + /* check if same flow type is requested, otherwise fail */ + if (ndev->flow_profile != flow_profile) { + NT_LOG(ERR, FILTER, + "ERROR: Different flow types requested on same NIC device. Not supported."); + goto err_exit0; + } + } + + /* Allocate the requested queues in HW for this dev */ + + for (i = 0; i < alloc_rx_queues; i++) { + eth_dev->rx_queue[i] = queue_ids[i]; + + if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE && exception_path)) { + /* + * Init QSL UNM - unmatched - redirects otherwise discarded + * packets in QSL + */ + if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port, + eth_dev->rx_queue[0].hw_id) < 0) + goto err_exit0; + + if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 1) < 0) + goto err_exit0; + + if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) < 0) + goto err_exit0; + } + + eth_dev->num_queues++; + } + + eth_dev->rss_target_id = -1; + + if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) { + for (i = 0; i < eth_dev->num_queues; i++) { + uint32_t qen_value = 0; + uint32_t queue_id = (uint32_t)eth_dev->rx_queue[i].hw_id; + + hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, &qen_value); + hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN, queue_id / 4, + qen_value | (1 << (queue_id % 4))); + hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1); + } + } + + *rss_target_id = eth_dev->rss_target_id; + + nic_insert_eth_port_dev(ndev, eth_dev); + + rte_spinlock_unlock(&ndev->mtx); + rte_spinlock_unlock(&base_mtx); + return eth_dev; + +err_exit0: + rte_spinlock_unlock(&ndev->mtx); + rte_spinlock_unlock(&base_mtx); + + if (eth_dev) + free(eth_dev); + +#ifdef FLOW_DEBUG + ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); +#endif + + NT_LOG(DBG, FILTER, "ERR in %s", __func__); + return NULL; /* Error exit */ +} + struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if, void *be_dev) { @@ -337,7 +759,7 @@ struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_b for (int i = 0; i < RES_COUNT; i++) assert(ndev->res[i].alloc_bm); - pthread_mutex_init(&ndev->mtx, NULL); + rte_spinlock_init(&ndev->mtx); list_insert_flow_nic(ndev); return ndev; @@ -380,9 +802,465 @@ void *flow_api_get_be_dev(struct flow_nic_dev *ndev) return ndev->be.be_dev; } +/* Information for a given RSS type. */ +struct rss_type_info { + uint64_t rss_type; + const char *str; +}; + +static struct rss_type_info rss_to_string[] = { + /* RTE_BIT64(2) IPv4 dst + IPv4 src */ + RSS_TO_STRING(RTE_ETH_RSS_IPV4), + /* RTE_BIT64(3) IPv4 dst + IPv4 src + Identification of group of fragments */ + RSS_TO_STRING(RTE_ETH_RSS_FRAG_IPV4), + /* RTE_BIT64(4) IPv4 dst + IPv4 src + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_TCP), + /* RTE_BIT64(5) IPv4 dst + IPv4 src + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_UDP), + /* RTE_BIT64(6) IPv4 dst + IPv4 src + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_SCTP), + /* RTE_BIT64(7) IPv4 dst + IPv4 src + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_NONFRAG_IPV4_OTHER), + /* + * RTE_BIT64(14) 128-bits of L2 payload starting after src MAC, i.e. including optional + * VLAN tag and ethertype. Overrides all L3 and L4 flags at the same level, but inner + * L2 payload can be combined with outer S-VLAN and GTPU TEID flags. + */ + RSS_TO_STRING(RTE_ETH_RSS_L2_PAYLOAD), + /* RTE_BIT64(18) L4 dst + L4 src + L4 protocol - see comment of RTE_ETH_RSS_L4_CHKSUM */ + RSS_TO_STRING(RTE_ETH_RSS_PORT), + /* RTE_BIT64(19) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_VXLAN), + /* RTE_BIT64(20) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_GENEVE), + /* RTE_BIT64(21) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_NVGRE), + /* RTE_BIT64(23) GTP TEID - always from outer GTPU header */ + RSS_TO_STRING(RTE_ETH_RSS_GTPU), + /* RTE_BIT64(24) MAC dst + MAC src */ + RSS_TO_STRING(RTE_ETH_RSS_ETH), + /* RTE_BIT64(25) outermost VLAN ID + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_S_VLAN), + /* RTE_BIT64(26) innermost VLAN ID + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_C_VLAN), + /* RTE_BIT64(27) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_ESP), + /* RTE_BIT64(28) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_AH), + /* RTE_BIT64(29) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L2TPV3), + /* RTE_BIT64(30) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_PFCP), + /* RTE_BIT64(31) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_PPPOE), + /* RTE_BIT64(32) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_ECPRI), + /* RTE_BIT64(33) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_MPLS), + /* RTE_BIT64(34) IPv4 Header checksum + L4 protocol */ + RSS_TO_STRING(RTE_ETH_RSS_IPV4_CHKSUM), + + /* + * if combined with RTE_ETH_RSS_NONFRAG_IPV4_[TCP|UDP|SCTP] then + * L4 protocol + chosen protocol header Checksum + * else + * error + */ + /* RTE_BIT64(35) */ + RSS_TO_STRING(RTE_ETH_RSS_L4_CHKSUM), +#ifndef ANDROMEDA_DPDK_21_11 + /* RTE_BIT64(36) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L2TPV2), +#endif + + { RTE_BIT64(37), "unknown_RTE_BIT64(37)" }, + { RTE_BIT64(38), "unknown_RTE_BIT64(38)" }, + { RTE_BIT64(39), "unknown_RTE_BIT64(39)" }, + { RTE_BIT64(40), "unknown_RTE_BIT64(40)" }, + { RTE_BIT64(41), "unknown_RTE_BIT64(41)" }, + { RTE_BIT64(42), "unknown_RTE_BIT64(42)" }, + { RTE_BIT64(43), "unknown_RTE_BIT64(43)" }, + { RTE_BIT64(44), "unknown_RTE_BIT64(44)" }, + { RTE_BIT64(45), "unknown_RTE_BIT64(45)" }, + { RTE_BIT64(46), "unknown_RTE_BIT64(46)" }, + { RTE_BIT64(47), "unknown_RTE_BIT64(47)" }, + { RTE_BIT64(48), "unknown_RTE_BIT64(48)" }, + { RTE_BIT64(49), "unknown_RTE_BIT64(49)" }, + + /* RTE_BIT64(50) outermost encapsulation */ + RSS_TO_STRING(RTE_ETH_RSS_LEVEL_OUTERMOST), + /* RTE_BIT64(51) innermost encapsulation */ + RSS_TO_STRING(RTE_ETH_RSS_LEVEL_INNERMOST), + + /* RTE_BIT64(52) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L3_PRE96), + /* RTE_BIT64(53) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L3_PRE64), + /* RTE_BIT64(54) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L3_PRE56), + /* RTE_BIT64(55) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L3_PRE48), + /* RTE_BIT64(56) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L3_PRE40), + /* RTE_BIT64(57) Not supported */ + RSS_TO_STRING(RTE_ETH_RSS_L3_PRE32), + + /* RTE_BIT64(58) */ + RSS_TO_STRING(RTE_ETH_RSS_L2_DST_ONLY), + /* RTE_BIT64(59) */ + RSS_TO_STRING(RTE_ETH_RSS_L2_SRC_ONLY), + /* RTE_BIT64(60) */ + RSS_TO_STRING(RTE_ETH_RSS_L4_DST_ONLY), + /* RTE_BIT64(61) */ + RSS_TO_STRING(RTE_ETH_RSS_L4_SRC_ONLY), + /* RTE_BIT64(62) */ + RSS_TO_STRING(RTE_ETH_RSS_L3_DST_ONLY), + /* RTE_BIT64(63) */ + RSS_TO_STRING(RTE_ETH_RSS_L3_SRC_ONLY), +}; + +int sprint_nt_rss_mask(char *str, uint16_t str_len, const char *prefix, uint64_t hash_mask) +{ + if (str == NULL || str_len == 0) + return -1; + + memset(str, 0x0, str_len); + uint16_t str_end = 0; + const struct rss_type_info *start = rss_to_string; + + for (const struct rss_type_info *p = start; p != start + ARRAY_SIZE(rss_to_string); ++p) { + if (p->rss_type & hash_mask) { + if (strlen(prefix) + strlen(p->str) < (size_t)(str_len - str_end)) { + snprintf(str + str_end, str_len - str_end, "%s", prefix); + str_end += strlen(prefix); + snprintf(str + str_end, str_len - str_end, "%s", p->str); + str_end += strlen(p->str); + + } else { + return -1; + } + } + } + + return 0; +} + +/* + * Hash + */ + +int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx, enum flow_nic_hash_e algorithm) +{ + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0); + + switch (algorithm) { + case HASH_ALGO_5TUPLE: + /* need to create an IPv6 hashing and enable the adaptive ip mask bit */ + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0, DYN_FINAL_IP_DST); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0, -16); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0, DYN_FINAL_IP_DST); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0, 0); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0, DYN_L4); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9, 0); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0, 0xffffffff); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0, 1); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0, HASH_5TUPLE); + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0, 1); + + NT_LOG(DBG, FILTER, "Set IPv6 5-tuple hasher with adaptive IPv4 hashing"); + break; + + default: + case HASH_ALGO_ROUND_ROBIN: + /* zero is round-robin */ + break; + } + + return 0; +} + +static int flow_dev_dump(struct flow_eth_dev *dev, + struct flow_handle *flow, + uint16_t caller_id, + FILE *file, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + return -1; + } + + return profile_inline_ops->flow_dev_dump_profile_inline(dev, flow, caller_id, file, error); +} + +int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx, + struct nt_eth_rss_conf rss_conf) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, FILTER, "%s: profile_inline module uninitialized", __func__); + return -1; + } + + return profile_inline_ops->flow_nic_set_hasher_fields_inline(ndev, hsh_idx, rss_conf); +} + +static int flow_get_aged_flows(struct flow_eth_dev *dev, + uint16_t caller_id, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline_ops uninitialized"); + return -1; + } + + if (nb_contexts > 0 && !context) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "rte_flow_get_aged_flows - empty context"; + return -1; + } + + return profile_inline_ops->flow_get_aged_flows_profile_inline(dev, caller_id, context, + nb_contexts, error); +} + +static int flow_info_get(struct flow_eth_dev *dev, uint8_t caller_id, + struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_info_get_profile_inline(dev, caller_id, port_info, + queue_info, error); +} + +static int flow_configure(struct flow_eth_dev *dev, uint8_t caller_id, + const struct rte_flow_port_attr *port_attr, uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_configure_profile_inline(dev, caller_id, port_attr, + nb_queue, queue_attr, error); +} + +/* + * Flow Asynchronous operation API + */ + +static struct flow_pattern_template * +flow_pattern_template_create(struct flow_eth_dev *dev, + const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_item pattern[], struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return NULL; + } + + return profile_inline_ops->flow_pattern_template_create_profile_inline(dev, template_attr, + caller_id, pattern, error); +} + +static int flow_pattern_template_destroy(struct flow_eth_dev *dev, + struct flow_pattern_template *pattern_template, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_pattern_template_destroy_profile_inline(dev, + pattern_template, + error); +} + +static struct flow_actions_template * +flow_actions_template_create(struct flow_eth_dev *dev, + const struct rte_flow_actions_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_action actions[], const struct rte_flow_action masks[], + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return NULL; + } + + return profile_inline_ops->flow_actions_template_create_profile_inline(dev, template_attr, + caller_id, actions, masks, error); +} + +static int flow_actions_template_destroy(struct flow_eth_dev *dev, + struct flow_actions_template *actions_template, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_actions_template_destroy_profile_inline(dev, + actions_template, + error); +} + +static struct flow_template_table *flow_template_table_create(struct flow_eth_dev *dev, + const struct rte_flow_template_table_attr *table_attr, uint16_t forced_vlan_vid, + uint16_t caller_id, struct flow_pattern_template *pattern_templates[], + uint8_t nb_pattern_templates, struct flow_actions_template *actions_templates[], + uint8_t nb_actions_templates, struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return NULL; + } + + return profile_inline_ops->flow_template_table_create_profile_inline(dev, table_attr, + forced_vlan_vid, caller_id, pattern_templates, nb_pattern_templates, + actions_templates, nb_actions_templates, error); +} + +static int flow_template_table_destroy(struct flow_eth_dev *dev, + struct flow_template_table *template_table, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_template_table_destroy_profile_inline(dev, template_table, + error); +} + +static struct flow_handle * +flow_async_create(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, struct flow_template_table *template_table, + const struct rte_flow_item pattern[], uint8_t pattern_template_index, + const struct rte_flow_action actions[], uint8_t actions_template_index, void *user_data, + struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return NULL; + } + + return profile_inline_ops->flow_async_create_profile_inline(dev, queue_id, op_attr, + template_table, pattern, pattern_template_index, actions, + actions_template_index, user_data, error); +} + +static int flow_async_destroy(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, struct flow_handle *flow, + void *user_data, struct rte_flow_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "profile_inline module uninitialized"); + return -1; + } + + return profile_inline_ops->flow_async_destroy_profile_inline(dev, queue_id, op_attr, flow, + user_data, error); +} +int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) + return -1; + + if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) + return profile_inline_ops->flow_get_flm_stats_profile_inline(ndev, data, size); + + return -1; +} + static const struct flow_filter_ops ops = { .flow_filter_init = flow_filter_init, .flow_filter_done = flow_filter_done, + /* + * Device Management API + */ + .flow_get_eth_dev = flow_get_eth_dev, + /* + * NT Flow API + */ + .flow_create = flow_create, + .flow_destroy = flow_destroy, + .flow_flush = flow_flush, + .flow_actions_update = flow_actions_update, + .flow_dev_dump = flow_dev_dump, + .flow_get_flm_stats = flow_get_flm_stats, + .flow_get_aged_flows = flow_get_aged_flows, + + /* + * NT Flow asynchronous operations API + */ + .flow_info_get = flow_info_get, + .flow_configure = flow_configure, + .flow_pattern_template_create = flow_pattern_template_create, + .flow_pattern_template_destroy = flow_pattern_template_destroy, + .flow_actions_template_create = flow_actions_template_create, + .flow_actions_template_destroy = flow_actions_template_destroy, + .flow_template_table_create = flow_template_table_create, + .flow_template_table_destroy = flow_template_table_destroy, + .flow_async_create = flow_async_create, + .flow_async_destroy = flow_async_destroy, + + /* + * Other + */ + .hw_mod_hsh_rcp_flush = hw_mod_hsh_rcp_flush, + .flow_nic_set_hasher_fields = flow_nic_set_hasher_fields, }; void init_flow_filter(void) diff --git a/drivers/net/ntnic/nthw/flow_api/flow_group.c b/drivers/net/ntnic/nthw/flow_api/flow_group.c new file mode 100644 index 0000000000..f76986b178 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/flow_group.c @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include +#include + +#include "flow_api_engine.h" + +#define OWNER_ID_COUNT 256 +#define PORT_COUNT 8 + +struct group_lookup_entry_s { + uint64_t ref_counter; + uint32_t *reverse_lookup; +}; + +struct group_handle_s { + uint32_t group_count; + + uint32_t *translation_table; + + struct group_lookup_entry_s *lookup_entries; +}; + +int flow_group_handle_create(void **handle, uint32_t group_count) +{ + struct group_handle_s *group_handle; + + *handle = calloc(1, sizeof(struct group_handle_s)); + group_handle = *handle; + + group_handle->group_count = group_count; + group_handle->translation_table = + calloc((uint32_t)(group_count * PORT_COUNT * OWNER_ID_COUNT), sizeof(uint32_t)); + group_handle->lookup_entries = calloc(group_count, sizeof(struct group_lookup_entry_s)); + + return *handle != NULL ? 0 : -1; +} + +int flow_group_handle_destroy(void **handle) +{ + if (*handle) { + struct group_handle_s *group_handle = (struct group_handle_s *)*handle; + + free(group_handle->translation_table); + free(group_handle->lookup_entries); + + free(*handle); + *handle = NULL; + } + + return 0; +} + +int flow_group_translate_get(void *handle, uint8_t owner_id, uint8_t port_id, uint32_t group_in, + uint32_t *group_out) +{ + struct group_handle_s *group_handle = (struct group_handle_s *)handle; + uint32_t *table_ptr; + uint32_t lookup; + + if (group_handle == NULL || group_in >= group_handle->group_count || port_id >= PORT_COUNT) + return -1; + + /* Don't translate group 0 */ + if (group_in == 0) { + *group_out = 0; + return 0; + } + + table_ptr = &group_handle->translation_table[port_id * OWNER_ID_COUNT * PORT_COUNT + + owner_id * OWNER_ID_COUNT + group_in]; + lookup = *table_ptr; + + if (lookup == 0) { + for (lookup = 1; lookup < group_handle->group_count && + group_handle->lookup_entries[lookup].ref_counter > 0; + ++lookup) + ; + + if (lookup < group_handle->group_count) { + group_handle->lookup_entries[lookup].reverse_lookup = table_ptr; + group_handle->lookup_entries[lookup].ref_counter += 1; + + *table_ptr = lookup; + + } else { + return -1; + } + + } else { + group_handle->lookup_entries[lookup].ref_counter += 1; + } + + *group_out = lookup; + return 0; +} diff --git a/drivers/net/ntnic/nthw/flow_api/flow_hasher.c b/drivers/net/ntnic/nthw/flow_api/flow_hasher.c new file mode 100644 index 0000000000..86dfc16e79 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/flow_hasher.c @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include + +#include "flow_hasher.h" + +static uint32_t shuffle(uint32_t x) +{ + return ((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) | ((x & 0x15555555) << 3) | + ((x & 0x40000000) >> 29); +} + +static uint32_t ror_inv(uint32_t x, const int s) +{ + return (x >> s) | ((~x) << (32 - s)); +} + +static uint32_t combine(uint32_t x, uint32_t y) +{ + uint32_t x1 = ror_inv(x, 15); + uint32_t x2 = ror_inv(x, 13); + uint32_t y1 = ror_inv(y, 3); + uint32_t y2 = ror_inv(y, 27); + + return x ^ y ^ + ((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) | (x1 & ~y1 & ~x2 & y2) | + (~x1 & y1 & x2 & ~y2) | (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)); +} + +static uint32_t mix(uint32_t x, uint32_t y) +{ + return shuffle(combine(x, y)); +} + +static uint64_t ror_inv3(uint64_t x) +{ + const uint64_t m = 0xE0000000E0000000ULL; + + return ((x >> 3) | m) ^ ((x << 29) & m); +} + +static uint64_t ror_inv13(uint64_t x) +{ + const uint64_t m = 0xFFF80000FFF80000ULL; + + return ((x >> 13) | m) ^ ((x << 19) & m); +} + +static uint64_t ror_inv15(uint64_t x) +{ + const uint64_t m = 0xFFFE0000FFFE0000ULL; + + return ((x >> 15) | m) ^ ((x << 17) & m); +} + +static uint64_t ror_inv27(uint64_t x) +{ + const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL; + + return ((x >> 27) | m) ^ ((x << 5) & m); +} + +static uint64_t shuffle64(uint64_t x) +{ + return ((x & 0x0000000200000002) << 29) | ((x & 0xAAAAAAA8AAAAAAA8) >> 3) | + ((x & 0x1555555515555555) << 3) | ((x & 0x4000000040000000) >> 29); +} + +static uint64_t pair(uint32_t x, uint32_t y) +{ + return ((uint64_t)x << 32) | y; +} + +static uint64_t combine64(uint64_t x, uint64_t y) +{ + uint64_t x1 = ror_inv15(x); + uint64_t x2 = ror_inv13(x); + uint64_t y1 = ror_inv3(y); + uint64_t y2 = ror_inv27(y); + + return x ^ y ^ + ((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) | (x1 & ~y1 & ~x2 & y2) | + (~x1 & y1 & x2 & ~y2) | (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)); +} + +static uint64_t mix64(uint64_t x, uint64_t y) +{ + return shuffle64(combine64(x, y)); +} + +static uint32_t calc16(const uint32_t key[16]) +{ + /* + * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Layer 0 + * \./ \./ \./ \./ \./ \./ \./ \./ + * 0 1 2 3 4 5 6 7 Layer 1 + * \__.__/ \__.__/ \__.__/ \__.__/ + * 0 1 2 3 Layer 2 + * \______.______/ \______.______/ + * 0 1 Layer 3 + * \______________.______________/ + * 0 Layer 4 + * / \ + * \./ + * 0 Layer 5 + * / \ + * \./ Layer 6 + * value + */ + + uint64_t z; + uint32_t x; + + z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])), + mix64(pair(key[2], key[10]), pair(key[3], key[11]))), + mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])), + mix64(pair(key[6], key[14]), pair(key[7], key[15])))); + + x = mix((uint32_t)(z >> 32), (uint32_t)z); + x = mix(x, ror_inv(x, 17)); + x = combine(x, ror_inv(x, 17)); + + return x; +} + +uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result) +{ + uint64_t val; + uint32_t res; + + val = calc16(key); + res = (uint32_t)val; + + if (hsh->cam_bw > 32) + val = (val << (hsh->cam_bw - 32)) ^ val; + + for (int i = 0; i < hsh->banks; i++) { + result[i] = (unsigned int)(val & hsh->cam_records_bw_mask); + val = val >> hsh->cam_records_bw; + } + + return res; +} + +int init_hasher(struct hasher_s *hsh, int banks, int nb_records) +{ + hsh->banks = banks; + hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1); + hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1; + hsh->cam_bw = hsh->banks * hsh->cam_records_bw; + + return 0; +} diff --git a/drivers/net/ntnic/nthw/flow_api/flow_hasher.h b/drivers/net/ntnic/nthw/flow_api/flow_hasher.h new file mode 100644 index 0000000000..15de8e9933 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/flow_hasher.h @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef _FLOW_HASHER_H_ +#define _FLOW_HASHER_H_ + +#include + +struct hasher_s { + int banks; + int cam_records_bw; + uint32_t cam_records_bw_mask; + int cam_bw; +}; + +int init_hasher(struct hasher_s *hsh, int _banks, int nb_records); +uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result); + +#endif /* _FLOW_HASHER_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/flow_id_table.c b/drivers/net/ntnic/nthw/flow_api/flow_id_table.c new file mode 100644 index 0000000000..a63f5542d1 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/flow_id_table.c @@ -0,0 +1,145 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#include +#include +#include + +#include "flow_id_table.h" +#include "rte_spinlock.h" + +#define NTNIC_ARRAY_BITS 14 +#define NTNIC_ARRAY_SIZE (1 << NTNIC_ARRAY_BITS) +#define NTNIC_ARRAY_MASK (NTNIC_ARRAY_SIZE - 1) +#define NTNIC_MAX_ID (NTNIC_ARRAY_SIZE * NTNIC_ARRAY_SIZE) +#define NTNIC_MAX_ID_MASK (NTNIC_MAX_ID - 1) +#define NTNIC_MIN_FREE 1000 + +struct ntnic_id_table_element { + union flm_handles handle; + uint8_t caller_id; + uint8_t type; +}; + +struct ntnic_id_table_data { + struct ntnic_id_table_element *arrays[NTNIC_ARRAY_SIZE]; + rte_spinlock_t mtx; + + uint32_t next_id; + + uint32_t free_head; + uint32_t free_tail; + uint32_t free_count; +}; + +static inline struct ntnic_id_table_element * +ntnic_id_table_array_find_element(struct ntnic_id_table_data *handle, uint32_t id) +{ + uint32_t idx_d1 = id & NTNIC_ARRAY_MASK; + uint32_t idx_d2 = (id >> NTNIC_ARRAY_BITS) & NTNIC_ARRAY_MASK; + + if (handle->arrays[idx_d2] == NULL) { + handle->arrays[idx_d2] = + calloc(NTNIC_ARRAY_SIZE, sizeof(struct ntnic_id_table_element)); + } + + return &handle->arrays[idx_d2][idx_d1]; +} + +static inline uint32_t ntnic_id_table_array_pop_free_id(struct ntnic_id_table_data *handle) +{ + uint32_t id = 0; + + if (handle->free_count > NTNIC_MIN_FREE) { + struct ntnic_id_table_element *element = + ntnic_id_table_array_find_element(handle, handle->free_tail); + id = handle->free_tail; + + handle->free_tail = element->handle.idx & NTNIC_MAX_ID_MASK; + handle->free_count -= 1; + } + + return id; +} + +void *ntnic_id_table_create(void) +{ + struct ntnic_id_table_data *handle = calloc(1, sizeof(struct ntnic_id_table_data)); + + rte_spinlock_init(&handle->mtx); + handle->next_id = 1; + + return handle; +} + +void ntnic_id_table_destroy(void *id_table) +{ + struct ntnic_id_table_data *handle = id_table; + + for (uint32_t i = 0; i < NTNIC_ARRAY_SIZE; ++i) + free(handle->arrays[i]); + + free(id_table); +} + +uint32_t ntnic_id_table_get_id(void *id_table, union flm_handles flm_h, uint8_t caller_id, + uint8_t type) +{ + struct ntnic_id_table_data *handle = id_table; + + rte_spinlock_lock(&handle->mtx); + + uint32_t new_id = ntnic_id_table_array_pop_free_id(handle); + + if (new_id == 0) + new_id = handle->next_id++; + + struct ntnic_id_table_element *element = ntnic_id_table_array_find_element(handle, new_id); + element->caller_id = caller_id; + element->type = type; + memcpy(&element->handle, &flm_h, sizeof(union flm_handles)); + + rte_spinlock_unlock(&handle->mtx); + + return new_id; +} + +void ntnic_id_table_free_id(void *id_table, uint32_t id) +{ + struct ntnic_id_table_data *handle = id_table; + + rte_spinlock_lock(&handle->mtx); + + struct ntnic_id_table_element *current_element = + ntnic_id_table_array_find_element(handle, id); + memset(current_element, 0, sizeof(struct ntnic_id_table_element)); + + struct ntnic_id_table_element *element = + ntnic_id_table_array_find_element(handle, handle->free_head); + element->handle.idx = id; + handle->free_head = id; + handle->free_count += 1; + + if (handle->free_tail == 0) + handle->free_tail = handle->free_head; + + rte_spinlock_unlock(&handle->mtx); +} + +void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h, uint8_t *caller_id, + uint8_t *type) +{ + struct ntnic_id_table_data *handle = id_table; + + rte_spinlock_lock(&handle->mtx); + + struct ntnic_id_table_element *element = ntnic_id_table_array_find_element(handle, id); + + *caller_id = element->caller_id; + *type = element->type; + memcpy(flm_h, &element->handle, sizeof(union flm_handles)); + + rte_spinlock_unlock(&handle->mtx); +} diff --git a/drivers/net/ntnic/nthw/flow_api/flow_id_table.h b/drivers/net/ntnic/nthw/flow_api/flow_id_table.h new file mode 100644 index 0000000000..edb4f42729 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/flow_id_table.h @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _FLOW_ID_TABLE_H_ +#define _FLOW_ID_TABLE_H_ + +#include + +union flm_handles { + uint64_t idx; + void *p; +}; + +void *ntnic_id_table_create(void); +void ntnic_id_table_destroy(void *id_table); + +uint32_t ntnic_id_table_get_id(void *id_table, union flm_handles flm_h, uint8_t caller_id, + uint8_t type); +void ntnic_id_table_free_id(void *id_table, uint32_t id); + +void ntnic_id_table_find(void *id_table, uint32_t id, union flm_handles *flm_h, uint8_t *caller_id, + uint8_t *type); + +#endif /* FLOW_ID_TABLE_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/flow_km.c b/drivers/net/ntnic/nthw/flow_api/flow_km.c index e04cd5e857..f79919cb81 100644 --- a/drivers/net/ntnic/nthw/flow_api/flow_km.c +++ b/drivers/net/ntnic/nthw/flow_api/flow_km.c @@ -3,10 +3,99 @@ * Copyright(c) 2023 Napatech A/S */ +#include #include #include "hw_mod_backend.h" #include "flow_api_engine.h" +#include "nt_util.h" +#include "flow_hasher.h" + +#define MAX_QWORDS 2 +#define MAX_SWORDS 2 + +#define CUCKOO_MOVE_MAX_DEPTH 8 + +#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks)) + +#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec)) +#define CAM_KM_DIST_IDX(bnk) \ + ({ \ + int _temp_bnk = (bnk); \ + CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \ + }) + +#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec)) + +#define CAM_ENTRIES \ + (km->be->km.nb_cam_banks * km->be->km.nb_cam_records * sizeof(struct cam_distrib_s)) +#define TCAM_ENTRIES \ + (km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * sizeof(struct tcam_distrib_s)) + +/* + * CAM structures and defines + */ +struct cam_distrib_s { + struct km_flow_def_s *km_owner; +}; + +static const struct cam_match_masks_s { + uint32_t word_len; + uint32_t key_mask[4]; +} cam_masks[] = { + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }, /* IP6_SRC, IP6_DST */ + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000 } }, /* DMAC,SMAC,ethtype */ + { 4, { 0xffffffff, 0xffff0000, 0x00000000, 0xffff0000 } }, /* DMAC,ethtype */ + { 4, { 0x00000000, 0x0000ffff, 0xffffffff, 0xffff0000 } }, /* SMAC,ethtype */ + { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */ + { 2, { 0xffffffff, 0xffffffff, 0x00000000, 0x00000000 } }, /* IP4_COMBINED */ + /* + * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC, + * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI + */ + { 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } }, + /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */ + { 1, { 0xffff0000, 0x00000000, 0x00000000, 0x00000000 } }, + { 1, { 0x0000ffff, 0x00000000, 0x00000000, 0x00000000 } }, /* TP_PORT_DST32 */ + /* IPv4 TOS mask bits used often by OVS */ + { 1, { 0x00030000, 0x00000000, 0x00000000, 0x00000000 } }, + /* IPv6 TOS mask bits used often by OVS */ + { 1, { 0x00300000, 0x00000000, 0x00000000, 0x00000000 } }, +}; + +static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH]; + +/* + * TCAM structures and defines + */ +struct tcam_distrib_s { + struct km_flow_def_s *km_owner; +}; + +static int tcam_find_mapping(struct km_flow_def_s *km); + +void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle) +{ + /* + * KM entries occupied in CAM - to manage the cuckoo shuffling + * and manage CAM population and usage + * KM entries occupied in TCAM - to manage population and usage + */ + if (!*handle) { + *handle = calloc(1, + (size_t)CAM_ENTRIES + sizeof(uint32_t) + (size_t)TCAM_ENTRIES + + sizeof(struct hasher_s)); + NT_LOG(DBG, FILTER, "Allocate NIC DEV CAM and TCAM record manager"); + } + + km->cam_dist = (struct cam_distrib_s *)*handle; + km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES); + km->tcam_dist = + (struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES + sizeof(uint32_t)); + + km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES); + init_hasher(km->hsh, km->be->km.nb_cam_banks, km->be->km.nb_cam_records); +} void km_free_ndev_resource_management(void **handle) { @@ -17,3 +106,1085 @@ void km_free_ndev_resource_management(void **handle) *handle = NULL; } + +int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4], uint32_t e_mask[4], + uint32_t word_len, enum frame_offs_e start_id, int8_t offset) +{ + /* valid word_len 1,2,4 */ + if (word_len == 3) { + word_len = 4; + e_word[3] = 0; + e_mask[3] = 0; + } + + if (word_len < 1 || word_len > 4) { + assert(0); + return -1; + } + + for (unsigned int i = 0; i < word_len; i++) { + km->match[km->num_ftype_elem].e_word[i] = e_word[i]; + km->match[km->num_ftype_elem].e_mask[i] = e_mask[i]; + } + + km->match[km->num_ftype_elem].word_len = word_len; + km->match[km->num_ftype_elem].rel_offs = offset; + km->match[km->num_ftype_elem].extr_start_offs_id = start_id; + + /* + * Determine here if this flow may better be put into TCAM + * Otherwise it will go into CAM + * This is dependent on a cam_masks list defined above + */ + km->match[km->num_ftype_elem].masked_for_tcam = 1; + + for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) { + if (word_len == cam_masks[msk].word_len) { + int match = 1; + + for (unsigned int wd = 0; wd < word_len; wd++) { + if (e_mask[wd] != cam_masks[msk].key_mask[wd]) { + match = 0; + break; + } + } + + if (match) { + /* Can go into CAM */ + km->match[km->num_ftype_elem].masked_for_tcam = 0; + } + } + } + + km->num_ftype_elem++; + return 0; +} + +static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[]) +{ + for (int i = 0; i < km->num_ftype_elem; i++) + if (!marked[i] && !(km->match[i].extr_start_offs_id & SWX_INFO) && + km->match[i].word_len == size) + return i; + + return -1; +} + +int km_key_create(struct km_flow_def_s *km, uint32_t port_id) +{ + /* + * Create combined extractor mappings + * if key fields may be changed to cover un-mappable otherwise? + * split into cam and tcam and use synergy mode when available + */ + int match_marked[MAX_MATCH_FIELDS]; + int idx = 0; + int next = 0; + int m_idx; + int size; + + memset(match_marked, 0, sizeof(match_marked)); + + /* build QWords */ + for (int qwords = 0; qwords < MAX_QWORDS; qwords++) { + size = 4; + m_idx = get_word(km, size, match_marked); + + if (m_idx < 0) { + size = 2; + m_idx = get_word(km, size, match_marked); + + if (m_idx < 0) { + size = 1; + m_idx = get_word(km, 1, match_marked); + } + } + + if (m_idx < 0) { + /* no more defined */ + break; + } + + match_marked[m_idx] = 1; + + /* build match map list and set final extractor to use */ + km->match_map[next] = &km->match[m_idx]; + km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD; + + /* build final entry words and mask array */ + for (int i = 0; i < size; i++) { + km->entry_word[idx + i] = km->match[m_idx].e_word[i]; + km->entry_mask[idx + i] = km->match[m_idx].e_mask[i]; + } + + idx += size; + next++; + } + + m_idx = get_word(km, 4, match_marked); + + if (m_idx >= 0) { + /* cannot match more QWords */ + return -1; + } + + /* + * On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now + * No match would be able to exploit these as DWORDs because of maximum length of 12 words + * in CAM The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none + * QWORDs where then both these DWORDs were possible in 10 words, but we don't have such + * use case built in yet + */ + /* build SWords */ + for (int swords = 0; swords < MAX_SWORDS; swords++) { + m_idx = get_word(km, 1, match_marked); + + if (m_idx < 0) { + /* no more defined */ + break; + } + + match_marked[m_idx] = 1; + /* build match map list and set final extractor to use */ + km->match_map[next] = &km->match[m_idx]; + km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD; + + /* build final entry words and mask array */ + km->entry_word[idx] = km->match[m_idx].e_word[0]; + km->entry_mask[idx] = km->match[m_idx].e_mask[0]; + idx++; + next++; + } + + /* + * Make sure we took them all + */ + m_idx = get_word(km, 1, match_marked); + + if (m_idx >= 0) { + /* cannot match more SWords */ + return -1; + } + + /* + * Handle SWX words specially + */ + int swx_found = 0; + + for (int i = 0; i < km->num_ftype_elem; i++) { + if (km->match[i].extr_start_offs_id & SWX_INFO) { + km->match_map[next] = &km->match[i]; + km->match[i].extr = KM_USE_EXTRACTOR_SWORD; + /* build final entry words and mask array */ + km->entry_word[idx] = km->match[i].e_word[0]; + km->entry_mask[idx] = km->match[i].e_mask[0]; + idx++; + next++; + swx_found = 1; + } + } + + assert(next == km->num_ftype_elem); + + km->key_word_size = idx; + km->port_id = port_id; + + km->target = KM_CAM; + + /* + * Finally decide if we want to put this match->action into the TCAM + * When SWX word used we need to put it into CAM always, no matter what mask pattern + * Later, when synergy mode is applied, we can do a split + */ + if (!swx_found && km->key_word_size <= 6) { + for (int i = 0; i < km->num_ftype_elem; i++) { + if (km->match_map[i]->masked_for_tcam) { + /* At least one */ + km->target = KM_TCAM; + } + } + } + + NT_LOG(DBG, FILTER, "This flow goes into %s", (km->target == KM_TCAM) ? "TCAM" : "CAM"); + + if (km->target == KM_TCAM) { + if (km->key_word_size > 10) { + /* do not support SWX in TCAM */ + return -1; + } + + /* + * adjust for unsupported key word size in TCAM + */ + if ((km->key_word_size == 5 || km->key_word_size == 7 || km->key_word_size == 9)) { + km->entry_mask[km->key_word_size] = 0; + km->key_word_size++; + } + + /* + * 1. the fact that the length of a key cannot change among the same used banks + * + * calculate possible start indexes + * unfortunately restrictions in TCAM lookup + * makes it hard to handle key lengths larger than 6 + * when other sizes should be possible too + */ + switch (km->key_word_size) { + case 1: + for (int i = 0; i < 4; i++) + km->start_offsets[0] = 8 + i; + + km->num_start_offsets = 4; + break; + + case 2: + km->start_offsets[0] = 6; + km->num_start_offsets = 1; + break; + + case 3: + km->start_offsets[0] = 0; + km->num_start_offsets = 1; + /* enlarge to 6 */ + km->entry_mask[km->key_word_size++] = 0; + km->entry_mask[km->key_word_size++] = 0; + km->entry_mask[km->key_word_size++] = 0; + break; + + case 4: + km->start_offsets[0] = 0; + km->num_start_offsets = 1; + /* enlarge to 6 */ + km->entry_mask[km->key_word_size++] = 0; + km->entry_mask[km->key_word_size++] = 0; + break; + + case 6: + km->start_offsets[0] = 0; + km->num_start_offsets = 1; + break; + + default: + NT_LOG(DBG, FILTER, "Final Key word size too large: %i", + km->key_word_size); + return -1; + } + } + + return 0; +} + +int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1) +{ + if (km->target != km1->target || km->num_ftype_elem != km1->num_ftype_elem || + km->key_word_size != km1->key_word_size || km->info_set != km1->info_set) + return 0; + + /* + * before KCC-CAM: + * if port is added to match, then we can have different ports in CAT + * that reuses this flow type + */ + int port_match_included = 0, kcc_swx_used = 0; + + for (int i = 0; i < km->num_ftype_elem; i++) { + if (km->match[i].extr_start_offs_id == SB_MAC_PORT) { + port_match_included = 1; + break; + } + + if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) { + kcc_swx_used = 1; + break; + } + } + + /* + * If not using KCC and if port match is not included in CAM, + * we need to have same port_id to reuse + */ + if (!kcc_swx_used && !port_match_included && km->port_id != km1->port_id) + return 0; + + for (int i = 0; i < km->num_ftype_elem; i++) { + /* using same extractor types in same sequence */ + if (km->match_map[i]->extr_start_offs_id != + km1->match_map[i]->extr_start_offs_id || + km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs || + km->match_map[i]->extr != km1->match_map[i]->extr || + km->match_map[i]->word_len != km1->match_map[i]->word_len) { + return 0; + } + } + + if (km->target == KM_CAM) { + /* in CAM must exactly match on all masks */ + for (int i = 0; i < km->key_word_size; i++) + if (km->entry_mask[i] != km1->entry_mask[i]) + return 0; + + /* Would be set later if not reusing from km1 */ + km->cam_paired = km1->cam_paired; + + } else if (km->target == KM_TCAM) { + /* + * If TCAM, we must make sure Recipe Key Mask does not + * mask out enable bits in masks + * Note: it is important that km1 is the original creator + * of the KM Recipe, since it contains its true masks + */ + for (int i = 0; i < km->key_word_size; i++) + if ((km->entry_mask[i] & km1->entry_mask[i]) != km->entry_mask[i]) + return 0; + + km->tcam_start_bank = km1->tcam_start_bank; + km->tcam_record = -1; /* needs to be found later */ + + } else { + NT_LOG(DBG, FILTER, "ERROR - KM target not defined or supported"); + return 0; + } + + /* + * Check for a flow clash. If already programmed return with -1 + */ + int double_match = 1; + + for (int i = 0; i < km->key_word_size; i++) { + if ((km->entry_word[i] & km->entry_mask[i]) != + (km1->entry_word[i] & km1->entry_mask[i])) { + double_match = 0; + break; + } + } + + if (double_match) + return -1; + + /* + * Note that TCAM and CAM may reuse same RCP and flow type + * when this happens, CAM entry wins on overlap + */ + + /* Use same KM Recipe and same flow type - return flow type */ + return km1->flow_type; +} + +int km_rcp_set(struct km_flow_def_s *km, int index) +{ + int qw = 0; + int sw = 0; + int swx = 0; + + hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0); + + /* set extractor words, offs, contrib */ + for (int i = 0; i < km->num_ftype_elem; i++) { + switch (km->match_map[i]->extr) { + case KM_USE_EXTRACTOR_SWORD: + if (km->match_map[i]->extr_start_offs_id & SWX_INFO) { + if (km->target == KM_CAM && swx == 0) { + /* SWX */ + if (km->match_map[i]->extr_start_offs_id == SB_VNI) { + NT_LOG(DBG, FILTER, "Set KM SWX sel A - VNI"); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_SWX_CCH, index, + 0, 1); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_SWX_SEL_A, + index, 0, SWX_SEL_ALL32); + + } else if (km->match_map[i]->extr_start_offs_id == + SB_MAC_PORT) { + NT_LOG(DBG, FILTER, + "Set KM SWX sel A - PTC + MAC"); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_SWX_SEL_A, + index, 0, SWX_SEL_ALL32); + + } else if (km->match_map[i]->extr_start_offs_id == + SB_KCC_ID) { + NT_LOG(DBG, FILTER, "Set KM SWX sel A - KCC ID"); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_SWX_CCH, index, + 0, 1); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_SWX_SEL_A, + index, 0, SWX_SEL_ALL32); + + } else { + return -1; + } + + } else { + return -1; + } + + swx++; + + } else { + if (sw == 0) { + /* DW8 */ + hw_mod_km_rcp_set(km->be, HW_KM_RCP_DW8_DYN, index, 0, + km->match_map[i]->extr_start_offs_id); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_DW8_OFS, index, 0, + km->match_map[i]->rel_offs); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_DW8_SEL_A, index, 0, + DW8_SEL_FIRST32); + NT_LOG(DBG, FILTER, + "Set KM DW8 sel A: dyn: %i, offs: %i", + km->match_map[i]->extr_start_offs_id, + km->match_map[i]->rel_offs); + + } else if (sw == 1) { + /* DW10 */ + hw_mod_km_rcp_set(km->be, HW_KM_RCP_DW10_DYN, index, 0, + km->match_map[i]->extr_start_offs_id); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_DW10_OFS, index, 0, + km->match_map[i]->rel_offs); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_DW10_SEL_A, index, 0, + DW10_SEL_FIRST32); + NT_LOG(DBG, FILTER, + "Set KM DW10 sel A: dyn: %i, offs: %i", + km->match_map[i]->extr_start_offs_id, + km->match_map[i]->rel_offs); + + } else { + return -1; + } + + sw++; + } + + break; + + case KM_USE_EXTRACTOR_QWORD: + if (qw == 0) { + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW0_DYN, index, 0, + km->match_map[i]->extr_start_offs_id); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW0_OFS, index, 0, + km->match_map[i]->rel_offs); + + switch (km->match_map[i]->word_len) { + case 1: + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW0_SEL_A, index, 0, + QW0_SEL_FIRST32); + break; + + case 2: + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW0_SEL_A, index, 0, + QW0_SEL_FIRST64); + break; + + case 4: + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW0_SEL_A, index, 0, + QW0_SEL_ALL128); + break; + + default: + return -1; + } + + NT_LOG(DBG, FILTER, + "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i", + km->match_map[i]->extr_start_offs_id, + km->match_map[i]->rel_offs, km->match_map[i]->word_len); + + } else if (qw == 1) { + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW4_DYN, index, 0, + km->match_map[i]->extr_start_offs_id); + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW4_OFS, index, 0, + km->match_map[i]->rel_offs); + + switch (km->match_map[i]->word_len) { + case 1: + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW4_SEL_A, index, 0, + QW4_SEL_FIRST32); + break; + + case 2: + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW4_SEL_A, index, 0, + QW4_SEL_FIRST64); + break; + + case 4: + hw_mod_km_rcp_set(km->be, HW_KM_RCP_QW4_SEL_A, index, 0, + QW4_SEL_ALL128); + break; + + default: + return -1; + } + + NT_LOG(DBG, FILTER, + "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i", + km->match_map[i]->extr_start_offs_id, + km->match_map[i]->rel_offs, km->match_map[i]->word_len); + + } else { + return -1; + } + + qw++; + break; + + default: + return -1; + } + } + + /* set mask A */ + for (int i = 0; i < km->key_word_size; i++) { + hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index, + (km->be->km.nb_km_rcp_mask_a_word_size - 1) - i, + km->entry_mask[i]); + NT_LOG(DBG, FILTER, "Set KM mask A: %08x", km->entry_mask[i]); + } + + if (km->target == KM_CAM) { + /* set info - Color */ + if (km->info_set) { + hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0, 1); + NT_LOG(DBG, FILTER, "Set KM info A"); + } + + /* set key length A */ + hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0, + km->key_word_size + !!km->info_set - 1); /* select id is -1 */ + /* set Flow Type for Key A */ + NT_LOG(DBG, FILTER, "Set KM EL A: %i", km->key_word_size + !!km->info_set - 1); + + hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0, 1 << km->flow_type); + + NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i", km->flow_type); + + /* Set Paired - only on the CAM part though... TODO split CAM and TCAM */ + if ((uint32_t)(km->key_word_size + !!km->info_set) > + km->be->km.nb_cam_record_words) { + hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED, index, 0, 1); + NT_LOG(DBG, FILTER, "Set KM CAM Paired"); + km->cam_paired = 1; + } + + } else if (km->target == KM_TCAM) { + uint32_t bank_bm = 0; + + if (tcam_find_mapping(km) < 0) { + /* failed mapping into TCAM */ + NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed"); + return -1; + } + + assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <= + km->be->km.nb_tcam_banks); + + for (int i = 0; i < km->key_word_size; i++) { + bank_bm |= + (1 << (km->be->km.nb_tcam_banks - 1 - (km->tcam_start_bank + i))); + } + + /* Set BANK_A */ + hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm); + /* Set Kl_A */ + hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0, km->key_word_size - 1); + + } else { + return -1; + } + + return 0; +} + +static int cam_populate(struct km_flow_def_s *km, int bank) +{ + int res = 0; + int cnt = km->key_word_size + !!km->info_set; + + for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt; i++, cnt--) { + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank, km->record_indexes[bank], + km->entry_word[i]); + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank, km->record_indexes[bank], + km->flow_type); + } + + km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km; + + if (cnt) { + assert(km->cam_paired); + + for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt; i++, cnt--) { + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank, + km->record_indexes[bank] + 1, + km->entry_word[km->be->km.nb_cam_record_words + i]); + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank, + km->record_indexes[bank] + 1, km->flow_type); + } + + km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km; + } + + res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank], km->cam_paired ? 2 : 1); + + return res; +} + +static int cam_reset_entry(struct km_flow_def_s *km, int bank) +{ + int res = 0; + int cnt = km->key_word_size + !!km->info_set; + + for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt; i++, cnt--) { + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank, km->record_indexes[bank], + 0); + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank, km->record_indexes[bank], + 0); + } + + km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL; + + if (cnt) { + assert(km->cam_paired); + + for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt; i++, cnt--) { + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank, + km->record_indexes[bank] + 1, 0); + res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank, + km->record_indexes[bank] + 1, 0); + } + + km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL; + } + + res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank], km->cam_paired ? 2 : 1); + return res; +} + +static int move_cuckoo_index(struct km_flow_def_s *km) +{ + assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner); + + for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) { + /* It will not select itself */ + if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) { + if (km->cam_paired) { + if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner != NULL) + continue; + } + + /* + * Populate in new position + */ + int res = cam_populate(km, bank); + + if (res) { + NT_LOG(DBG, FILTER, + "Error: failed to write to KM CAM in cuckoo move"); + return 0; + } + + /* + * Reset/free entry in old bank + * HW flushes are really not needed, the old addresses are always taken + * over by the caller If you change this code in future updates, this may + * no longer be true then! + */ + km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner = NULL; + + if (km->cam_paired) + km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1].km_owner = NULL; + + NT_LOG(DBG, FILTER, + "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)", + km->bank_used, bank, CAM_KM_DIST_IDX(km->bank_used), + CAM_KM_DIST_IDX(bank)); + km->bank_used = bank; + (*km->cuckoo_moves)++; + return 1; + } + } + + return 0; +} + +static int move_cuckoo_index_level(struct km_flow_def_s *km_parent, int bank_idx, int levels, + int cam_adr_list_len) +{ + struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner; + + assert(levels <= CUCKOO_MOVE_MAX_DEPTH); + + /* + * Only move if same pairness + * Can be extended later to handle both move of paired and single entries + */ + if (!km || km_parent->cam_paired != km->cam_paired) + return 0; + + if (move_cuckoo_index(km)) + return 1; + + if (levels <= 1) + return 0; + + assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH); + + cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx; + + for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) { + int reserved = 0; + int new_idx = CAM_KM_DIST_IDX(i); + + for (int i_reserved = 0; i_reserved < cam_adr_list_len; i_reserved++) { + if (cam_addr_reserved_stack[i_reserved] == new_idx) { + reserved = 1; + break; + } + } + + if (reserved) + continue; + + int res = move_cuckoo_index_level(km, new_idx, levels - 1, cam_adr_list_len); + + if (res) { + if (move_cuckoo_index(km)) + return 1; + + assert(0); + } + } + + return 0; +} + +static int km_write_data_to_cam(struct km_flow_def_s *km) +{ + int res = 0; + int val[MAX_BANKS]; + assert(km->be->km.nb_cam_banks <= MAX_BANKS); + assert(km->cam_dist); + + /* word list without info set */ + gethash(km->hsh, km->entry_word, val); + + for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) { + /* if paired we start always on an even address - reset bit 0 */ + km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i]; + } + + NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]", km->record_indexes[0], + km->record_indexes[1], km->record_indexes[2]); + + if (km->info_set) + km->entry_word[km->key_word_size] = km->info; /* finally set info */ + + int bank = -1; + + /* + * first step, see if any of the banks are free + */ + for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) { + if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) { + if (km->cam_paired == 0 || + km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1].km_owner == NULL) { + bank = i_bank; + break; + } + } + } + + if (bank < 0) { + /* + * Second step - cuckoo move existing flows if possible + */ + for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) { + if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank), 4, 0)) { + bank = i_bank; + break; + } + } + } + + if (bank < 0) + return -1; + + /* populate CAM */ + NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)", bank, CAM_KM_DIST_IDX(bank)); + res = cam_populate(km, bank); + + if (res == 0) { + km->flushed_to_target = 1; + km->bank_used = bank; + } + + return res; +} + +/* + * TCAM + */ +static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank) +{ + for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) { + if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner == NULL) { + int pass = 1; + + for (int ii = 1; ii < km->key_word_size; ii++) { + if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii, rec)].km_owner != + NULL) { + pass = 0; + break; + } + } + + if (pass) { + km->tcam_record = rec; + return 1; + } + } + } + + return 0; +} + +static int tcam_find_mapping(struct km_flow_def_s *km) +{ + /* Search record and start index for this flow */ + for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) { + if (tcam_find_free_record(km, km->start_offsets[bs_idx])) { + km->tcam_start_bank = km->start_offsets[bs_idx]; + NT_LOG(DBG, FILTER, "Found space in TCAM start bank %i, record %i", + km->tcam_start_bank, km->tcam_record); + return 0; + } + } + + return -1; +} + +static int tcam_write_word(struct km_flow_def_s *km, int bank, int record, uint32_t word, + uint32_t mask) +{ + int err = 0; + uint32_t all_recs[3]; + + int rec_val = record / 32; + int rec_bit_shft = record % 32; + uint32_t rec_bit = (1 << rec_bit_shft); + + assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3); + + for (int byte = 0; byte < 4; byte++) { + uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff); + uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff); + /* calculate important value bits */ + a = a & a_m; + + for (int val = 0; val < 256; val++) { + err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank, byte, val, all_recs); + + if ((val & a_m) == a) + all_recs[rec_val] |= rec_bit; + else + all_recs[rec_val] &= ~rec_bit; + + err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank, byte, val, all_recs); + + if (err) + break; + } + } + + /* flush bank */ + err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES); + + if (err == 0) { + assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner == NULL); + km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km; + } + + return err; +} + +static int km_write_data_to_tcam(struct km_flow_def_s *km) +{ + int err = 0; + + if (km->tcam_record < 0) { + tcam_find_free_record(km, km->tcam_start_bank); + + if (km->tcam_record < 0) { + NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow"); + return -1; + } + + NT_LOG(DBG, FILTER, "Reused RCP: Found space in TCAM start bank %i, record %i", + km->tcam_start_bank, km->tcam_record); + } + + /* Write KM_TCI */ + err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank, km->tcam_record, + km->info); + err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank, km->tcam_record, + km->flow_type); + err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1); + + for (int i = 0; i < km->key_word_size && !err; i++) { + err = tcam_write_word(km, km->tcam_start_bank + i, km->tcam_record, + km->entry_word[i], km->entry_mask[i]); + } + + if (err == 0) + km->flushed_to_target = 1; + + return err; +} + +static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record) +{ + int err = 0; + uint32_t all_recs[3]; + + int rec_val = record / 32; + int rec_bit_shft = record % 32; + uint32_t rec_bit = (1 << rec_bit_shft); + + assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3); + + for (int byte = 0; byte < 4; byte++) { + for (int val = 0; val < 256; val++) { + err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank, byte, val, all_recs); + + if (err) + break; + + all_recs[rec_val] &= ~rec_bit; + err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank, byte, val, all_recs); + + if (err) + break; + } + } + + if (err) + return err; + + /* flush bank */ + err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES); + km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL; + + NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x", bank, rec_val, + rec_bit); + + return err; +} + +static int tcam_reset_entry(struct km_flow_def_s *km) +{ + int err = 0; + + if (km->tcam_start_bank < 0 || km->tcam_record < 0) { + NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow"); + return -1; + } + + /* Write KM_TCI */ + hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank, km->tcam_record, 0); + hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank, km->tcam_record, 0); + hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1); + + for (int i = 0; i < km->key_word_size && !err; i++) + err = tcam_reset_bank(km, km->tcam_start_bank + i, km->tcam_record); + + return err; +} + +int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color) +{ + int res = -1; + + km->info = color; + NT_LOG(DBG, FILTER, "Write Data entry Color: %08x", color); + + switch (km->target) { + case KM_CAM: + res = km_write_data_to_cam(km); + break; + + case KM_TCAM: + res = km_write_data_to_tcam(km); + break; + + case KM_SYNERGY: + default: + break; + } + + return res; +} + +int km_clear_data_match_entry(struct km_flow_def_s *km) +{ + int res = 0; + + if (km->root) { + struct km_flow_def_s *km1 = km->root; + + while (km1->reference != km) + km1 = km1->reference; + + km1->reference = km->reference; + + km->flushed_to_target = 0; + km->bank_used = 0; + + } else if (km->reference) { + km->reference->root = NULL; + + switch (km->target) { + case KM_CAM: + km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner = km->reference; + + if (km->key_word_size + !!km->info_set > 1) { + assert(km->cam_paired); + km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1].km_owner = + km->reference; + } + + break; + + case KM_TCAM: + for (int i = 0; i < km->key_word_size; i++) { + km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i, + km->tcam_record)] + .km_owner = km->reference; + } + + break; + + case KM_SYNERGY: + default: + res = -1; + break; + } + + km->flushed_to_target = 0; + km->bank_used = 0; + + } else if (km->flushed_to_target) { + switch (km->target) { + case KM_CAM: + res = cam_reset_entry(km, km->bank_used); + break; + + case KM_TCAM: + res = tcam_reset_entry(km); + break; + + case KM_SYNERGY: + default: + res = -1; + break; + } + + km->flushed_to_target = 0; + km->bank_used = 0; + } + + return res; +} diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c index d266760123..985c821312 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_cat.c @@ -902,6 +902,95 @@ static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be, enum km_flm_if_se return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx, start_idx, count); } +int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count) +{ + return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count); +} + +int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count) +{ + return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count); +} + +static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int km_if_id, int index, + uint32_t *value, int get) +{ + if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8)) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + /* find KM module */ + int km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id); + + if (km_if_idx < 0) + return km_if_idx; + + switch (_VER_) { + case 18: + switch (field) { + case HW_CAT_KCE_ENABLE_BM: + GET_SET(be->cat.v18.kce[index].enable_bm, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 18 */ + case 21: + switch (field) { + case HW_CAT_KCE_ENABLE_BM: + GET_SET(be->cat.v21.kce[index].enable_bm[km_if_idx], value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 21 */ + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value) +{ + return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0); +} + +int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value) +{ + return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1); +} + +int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value) +{ + return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0); +} + +int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value) +{ + return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1); +} + /* * KCS */ @@ -925,6 +1014,95 @@ static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be, enum km_flm_if_se return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx, start_idx, count); } +int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count) +{ + return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count); +} + +int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count) +{ + return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count); +} + +static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int km_if_id, int index, + uint32_t *value, int get) +{ + if ((unsigned int)index >= be->cat.nb_cat_funcs) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + /* find KM module */ + int km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id); + + if (km_if_idx < 0) + return km_if_idx; + + switch (_VER_) { + case 18: + switch (field) { + case HW_CAT_KCS_CATEGORY: + GET_SET(be->cat.v18.kcs[index].category, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 18 */ + case 21: + switch (field) { + case HW_CAT_KCS_CATEGORY: + GET_SET(be->cat.v21.kcs[index].category[km_if_idx], value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 21 */ + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value) +{ + return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0); +} + +int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value) +{ + return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1); +} + +int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value) +{ + return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0); +} + +int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value) +{ + return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1); +} + /* * FTE */ @@ -951,6 +1129,97 @@ static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be, enum km_flm_if_se return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx, start_idx, count); } +int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count) +{ + return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count); +} + +int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be, enum km_flm_if_select_e if_num, + int start_idx, int count) +{ + return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count); +} + +static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int km_if_id, int index, + uint32_t *value, int get) +{ + const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2; + + if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt)) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + /* find KM module */ + int km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id); + + if (km_if_idx < 0) + return km_if_idx; + + switch (_VER_) { + case 18: + switch (field) { + case HW_CAT_FTE_ENABLE_BM: + GET_SET(be->cat.v18.fte[index].enable_bm, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 18 */ + case 21: + switch (field) { + case HW_CAT_FTE_ENABLE_BM: + GET_SET(be->cat.v21.fte[index].enable_bm[km_if_idx], value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 21 */ + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value) +{ + return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0); +} + +int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value) +{ + return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1); +} + +int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t value) +{ + return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0); +} + +int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field, + enum km_flm_if_select_e if_num, int index, uint32_t *value) +{ + return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1); +} + int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) @@ -964,6 +1233,51 @@ int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count); } +static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value, int get) +{ + if ((unsigned int)index >= be->cat.nb_cat_funcs) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 18: + case 21: + switch (field) { + case HW_CAT_CTE_ENABLE_BM: + GET_SET(be->cat.v18.cte[index].enable_bm, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 18/21 */ + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t value) +{ + return hw_mod_cat_cte_mod(be, field, index, &value, 0); +} + +int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value) +{ + return hw_mod_cat_cte_mod(be, field, index, value, 1); +} + int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx, int count) { int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2); @@ -979,6 +1293,57 @@ int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count); } +static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value, int get) +{ + int addr_size = (be->cat.cts_num + 1) / 2; + + if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size)) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 18: + case 21: + switch (field) { + case HW_CAT_CTS_CAT_A: + GET_SET(be->cat.v18.cts[index].cat_a, value); + break; + + case HW_CAT_CTS_CAT_B: + GET_SET(be->cat.v18.cts[index].cat_b, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 18/21 */ + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t value) +{ + return hw_mod_cat_cts_mod(be, field, index, &value, 0); +} + +int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value) +{ + return hw_mod_cat_cts_mod(be, field, index, value, 1); +} + int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) @@ -992,6 +1357,98 @@ int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count); } +static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t *value, int get) +{ + if ((unsigned int)index >= be->max_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 18: + case 21: + switch (field) { + case HW_CAT_COT_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->cat.v18.cot[index], (uint8_t)*value, + sizeof(struct cat_v18_cot_s)); + break; + + case HW_CAT_COT_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->max_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->cat.v18.cot, struct cat_v18_cot_s, index, *value); + break; + + case HW_CAT_COT_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->max_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->cat.v18.cot, struct cat_v18_cot_s, index, *value, + be->max_categories); + break; + + case HW_CAT_COT_COPY_FROM: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memcpy(&be->cat.v18.cot[index], &be->cat.v18.cot[*value], + sizeof(struct cat_v18_cot_s)); + break; + + case HW_CAT_COT_COLOR: + GET_SET(be->cat.v18.cot[index].color, value); + break; + + case HW_CAT_COT_KM: + GET_SET(be->cat.v18.cot[index].km, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 18/21 */ + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field, int index, + uint32_t value) +{ + return hw_mod_cat_cot_mod(be, field, index, &value, 0); +} + int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c index 8c1f3f2d96..14dd95a150 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_flm.c @@ -293,11 +293,268 @@ int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field, u return hw_mod_flm_control_mod(be, field, &value, 0); } +int hw_mod_flm_status_update(struct flow_api_backend_s *be) +{ + return be->iface->flm_status_update(be->be_dev, &be->flm); +} + +static int hw_mod_flm_status_mod(struct flow_api_backend_s *be, enum hw_flm_e field, + uint32_t *value, int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_STATUS_CALIB_SUCCESS: + GET_SET(be->flm.v25.status->calib_success, value); + break; + + case HW_FLM_STATUS_CALIB_FAIL: + GET_SET(be->flm.v25.status->calib_fail, value); + break; + + case HW_FLM_STATUS_INITDONE: + GET_SET(be->flm.v25.status->initdone, value); + break; + + case HW_FLM_STATUS_IDLE: + GET_SET(be->flm.v25.status->idle, value); + break; + + case HW_FLM_STATUS_CRITICAL: + GET_SET(be->flm.v25.status->critical, value); + break; + + case HW_FLM_STATUS_PANIC: + GET_SET(be->flm.v25.status->panic, value); + break; + + case HW_FLM_STATUS_CRCERR: + GET_SET(be->flm.v25.status->crcerr, value); + break; + + case HW_FLM_STATUS_EFT_BP: + GET_SET(be->flm.v25.status->eft_bp, value); + break; + + case HW_FLM_STATUS_CACHE_BUFFER_CRITICAL: + GET_SET(be->flm.v25.status->cache_buf_critical, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value) +{ + return hw_mod_flm_status_mod(be, field, value, 1); +} + int hw_mod_flm_scan_flush(struct flow_api_backend_s *be) { return be->iface->flm_scan_flush(be->be_dev, &be->flm); } +static int hw_mod_flm_scan_mod(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value, + int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_SCAN_I: + GET_SET(be->flm.v25.scan->i, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_scan_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value) +{ + return hw_mod_flm_scan_mod(be, field, &value, 0); +} + +int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be) +{ + return be->iface->flm_load_bin_flush(be->be_dev, &be->flm); +} + +static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be, enum hw_flm_e field, + uint32_t *value, int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_LOAD_BIN: + GET_SET(be->flm.v25.load_bin->bin, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value) +{ + return hw_mod_flm_load_bin_mod(be, field, &value, 0); +} + +int hw_mod_flm_prio_flush(struct flow_api_backend_s *be) +{ + return be->iface->flm_prio_flush(be->be_dev, &be->flm); +} + +static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value, + int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_PRIO_LIMIT0: + GET_SET(be->flm.v25.prio->limit0, value); + break; + + case HW_FLM_PRIO_FT0: + GET_SET(be->flm.v25.prio->ft0, value); + break; + + case HW_FLM_PRIO_LIMIT1: + GET_SET(be->flm.v25.prio->limit1, value); + break; + + case HW_FLM_PRIO_FT1: + GET_SET(be->flm.v25.prio->ft1, value); + break; + + case HW_FLM_PRIO_LIMIT2: + GET_SET(be->flm.v25.prio->limit2, value); + break; + + case HW_FLM_PRIO_FT2: + GET_SET(be->flm.v25.prio->ft2, value); + break; + + case HW_FLM_PRIO_LIMIT3: + GET_SET(be->flm.v25.prio->limit3, value); + break; + + case HW_FLM_PRIO_FT3: + GET_SET(be->flm.v25.prio->ft3, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t value) +{ + return hw_mod_flm_prio_mod(be, field, &value, 0); +} + +int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx, int count) +{ + if (count == ALL_ENTRIES) + count = be->flm.nb_pst_profiles; + + if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count); +} + +static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t *value, int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_PST_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->flm.v25.pst[index], (uint8_t)*value, + sizeof(struct flm_v25_pst_s)); + break; + + case HW_FLM_PST_BP: + GET_SET(be->flm.v25.pst[index].bp, value); + break; + + case HW_FLM_PST_PP: + GET_SET(be->flm.v25.pst[index].pp, value); + break; + + case HW_FLM_PST_TP: + GET_SET(be->flm.v25.pst[index].tp, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t value) +{ + return hw_mod_flm_pst_mod(be, field, index, &value, 0); +} + int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) @@ -322,3 +579,469 @@ int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be, int start_idx, int cou } return be->iface->flm_scrub_flush(be->be_dev, &be->flm, start_idx, count); } + +static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t *value, int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_RCP_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->flm.v25.rcp[index], (uint8_t)*value, + sizeof(struct flm_v25_rcp_s)); + break; + + case HW_FLM_RCP_LOOKUP: + GET_SET(be->flm.v25.rcp[index].lookup, value); + break; + + case HW_FLM_RCP_QW0_DYN: + GET_SET(be->flm.v25.rcp[index].qw0_dyn, value); + break; + + case HW_FLM_RCP_QW0_OFS: + GET_SET(be->flm.v25.rcp[index].qw0_ofs, value); + break; + + case HW_FLM_RCP_QW0_SEL: + GET_SET(be->flm.v25.rcp[index].qw0_sel, value); + break; + + case HW_FLM_RCP_QW4_DYN: + GET_SET(be->flm.v25.rcp[index].qw4_dyn, value); + break; + + case HW_FLM_RCP_QW4_OFS: + GET_SET(be->flm.v25.rcp[index].qw4_ofs, value); + break; + + case HW_FLM_RCP_SW8_DYN: + GET_SET(be->flm.v25.rcp[index].sw8_dyn, value); + break; + + case HW_FLM_RCP_SW8_OFS: + GET_SET(be->flm.v25.rcp[index].sw8_ofs, value); + break; + + case HW_FLM_RCP_SW8_SEL: + GET_SET(be->flm.v25.rcp[index].sw8_sel, value); + break; + + case HW_FLM_RCP_SW9_DYN: + GET_SET(be->flm.v25.rcp[index].sw9_dyn, value); + break; + + case HW_FLM_RCP_SW9_OFS: + GET_SET(be->flm.v25.rcp[index].sw9_ofs, value); + break; + + case HW_FLM_RCP_MASK: + if (get) { + memcpy(value, be->flm.v25.rcp[index].mask, + sizeof(((struct flm_v25_rcp_s *)0)->mask)); + + } else { + memcpy(be->flm.v25.rcp[index].mask, value, + sizeof(((struct flm_v25_rcp_s *)0)->mask)); + } + + break; + + case HW_FLM_RCP_KID: + GET_SET(be->flm.v25.rcp[index].kid, value); + break; + + case HW_FLM_RCP_OPN: + GET_SET(be->flm.v25.rcp[index].opn, value); + break; + + case HW_FLM_RCP_IPN: + GET_SET(be->flm.v25.rcp[index].ipn, value); + break; + + case HW_FLM_RCP_BYT_DYN: + GET_SET(be->flm.v25.rcp[index].byt_dyn, value); + break; + + case HW_FLM_RCP_BYT_OFS: + GET_SET(be->flm.v25.rcp[index].byt_ofs, value); + break; + + case HW_FLM_RCP_TXPLM: + GET_SET(be->flm.v25.rcp[index].txplm, value); + break; + + case HW_FLM_RCP_AUTO_IPV4_MASK: + GET_SET(be->flm.v25.rcp[index].auto_ipv4_mask, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t *value) +{ + if (field != HW_FLM_RCP_MASK) + return UNSUP_VER; + + return hw_mod_flm_rcp_mod(be, field, index, value, 0); +} + +int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t value) +{ + if (field == HW_FLM_RCP_MASK) + return UNSUP_VER; + + return hw_mod_flm_rcp_mod(be, field, index, &value, 0); +} + +int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be) +{ + return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm); +} + +static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be, enum hw_flm_e field, + uint32_t *value) +{ + int get = 1; /* Only get supported */ + + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_BUF_CTRL_LRN_FREE: + GET_SET(be->flm.v25.buf_ctrl->lrn_free, value); + break; + + case HW_FLM_BUF_CTRL_INF_AVAIL: + GET_SET(be->flm.v25.buf_ctrl->inf_avail, value); + break; + + case HW_FLM_BUF_CTRL_STA_AVAIL: + GET_SET(be->flm.v25.buf_ctrl->sta_avail, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value) +{ + return hw_mod_flm_buf_ctrl_mod_get(be, field, value); +} + +int hw_mod_flm_stat_update(struct flow_api_backend_s *be) +{ + return be->iface->flm_stat_update(be->be_dev, &be->flm); +} + +int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field, uint32_t *value) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_STAT_LRN_DONE: + *value = be->flm.v25.lrn_done->cnt; + break; + + case HW_FLM_STAT_LRN_IGNORE: + *value = be->flm.v25.lrn_ignore->cnt; + break; + + case HW_FLM_STAT_LRN_FAIL: + *value = be->flm.v25.lrn_fail->cnt; + break; + + case HW_FLM_STAT_UNL_DONE: + *value = be->flm.v25.unl_done->cnt; + break; + + case HW_FLM_STAT_UNL_IGNORE: + *value = be->flm.v25.unl_ignore->cnt; + break; + + case HW_FLM_STAT_REL_DONE: + *value = be->flm.v25.rel_done->cnt; + break; + + case HW_FLM_STAT_REL_IGNORE: + *value = be->flm.v25.rel_ignore->cnt; + break; + + case HW_FLM_STAT_PRB_DONE: + *value = be->flm.v25.prb_done->cnt; + break; + + case HW_FLM_STAT_PRB_IGNORE: + *value = be->flm.v25.prb_ignore->cnt; + break; + + case HW_FLM_STAT_AUL_DONE: + *value = be->flm.v25.aul_done->cnt; + break; + + case HW_FLM_STAT_AUL_IGNORE: + *value = be->flm.v25.aul_ignore->cnt; + break; + + case HW_FLM_STAT_AUL_FAIL: + *value = be->flm.v25.aul_fail->cnt; + break; + + case HW_FLM_STAT_TUL_DONE: + *value = be->flm.v25.tul_done->cnt; + break; + + case HW_FLM_STAT_FLOWS: + *value = be->flm.v25.flows->cnt; + break; + + case HW_FLM_LOAD_LPS: + *value = be->flm.v25.load_lps->lps; + break; + + case HW_FLM_LOAD_APS: + *value = be->flm.v25.load_aps->aps; + break; + + default: { + if (_VER_ < 18) + return UNSUP_FIELD; + + switch (field) { + case HW_FLM_STAT_STA_DONE: + *value = be->flm.v25.sta_done->cnt; + break; + + case HW_FLM_STAT_INF_DONE: + *value = be->flm.v25.inf_done->cnt; + break; + + case HW_FLM_STAT_INF_SKIP: + *value = be->flm.v25.inf_skip->cnt; + break; + + case HW_FLM_STAT_PCK_HIT: + *value = be->flm.v25.pck_hit->cnt; + break; + + case HW_FLM_STAT_PCK_MISS: + *value = be->flm.v25.pck_miss->cnt; + break; + + case HW_FLM_STAT_PCK_UNH: + *value = be->flm.v25.pck_unh->cnt; + break; + + case HW_FLM_STAT_PCK_DIS: + *value = be->flm.v25.pck_dis->cnt; + break; + + case HW_FLM_STAT_CSH_HIT: + *value = be->flm.v25.csh_hit->cnt; + break; + + case HW_FLM_STAT_CSH_MISS: + *value = be->flm.v25.csh_miss->cnt; + break; + + case HW_FLM_STAT_CSH_UNH: + *value = be->flm.v25.csh_unh->cnt; + break; + + case HW_FLM_STAT_CUC_START: + *value = be->flm.v25.cuc_start->cnt; + break; + + case HW_FLM_STAT_CUC_MOVE: + *value = be->flm.v25.cuc_move->cnt; + break; + + default: + return UNSUP_FIELD; + } + } + break; + } + + break; + + default: + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be, enum hw_flm_e field, + const uint32_t *value, uint32_t records, + uint32_t *handled_records, uint32_t *inf_word_cnt, + uint32_t *sta_word_cnt) +{ + int ret = 0; + + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_FLOW_LRN_DATA: + ret = be->iface->flm_lrn_data_flush(be->be_dev, &be->flm, value, records, + handled_records, + (sizeof(struct flm_v25_lrn_data_s) / + sizeof(uint32_t)), + inf_word_cnt, sta_word_cnt); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return ret; +} + +int hw_mod_flm_inf_sta_data_update_get(struct flow_api_backend_s *be, enum hw_flm_e field, + uint32_t *inf_value, uint32_t inf_size, + uint32_t *inf_word_cnt, uint32_t *sta_value, + uint32_t sta_size, uint32_t *sta_word_cnt) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_FLOW_INF_STA_DATA: + be->iface->flm_inf_sta_data_update(be->be_dev, &be->flm, inf_value, + inf_size, inf_word_cnt, sta_value, + sta_size, sta_word_cnt); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +/* + * SCRUB timeout support functions to encode users' input into FPGA 8-bit time format: + * Timeout in seconds (2^30 nanoseconds); zero means disabled. Value is: + * + * (T[7:3] != 0) ? ((8 + T[2:0]) shift-left (T[7:3] - 1)) : T[2:0] + * + * The maximum allowed value is 0xEF (127 years). + * + * Note that this represents a lower bound on the timeout, depending on the flow + * scanner interval and overall load, the timeout can be substantially longer. + */ +uint32_t hw_mod_flm_scrub_timeout_decode(uint32_t t_enc) +{ + uint8_t t_bits_2_0 = t_enc & 0x07; + uint8_t t_bits_7_3 = (t_enc >> 3) & 0x1F; + return t_bits_7_3 != 0 ? ((8 + t_bits_2_0) << (t_bits_7_3 - 1)) : t_bits_2_0; +} + +uint32_t hw_mod_flm_scrub_timeout_encode(uint32_t t) +{ + uint32_t t_enc = 0; + + if (t > 0) { + uint32_t t_dec = 0; + + do { + t_enc++; + t_dec = hw_mod_flm_scrub_timeout_decode(t_enc); + } while (t_enc <= 0xEF && t_dec < t); + } + + return t_enc; +} + +static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t *value, int get) +{ + switch (_VER_) { + case 25: + switch (field) { + case HW_FLM_SCRUB_PRESET_ALL: + if (get) + return UNSUP_FIELD; + + memset(&be->flm.v25.scrub[index], (uint8_t)*value, + sizeof(struct flm_v25_scrub_s)); + break; + + case HW_FLM_SCRUB_T: + GET_SET(be->flm.v25.scrub[index].t, value); + break; + + case HW_FLM_SCRUB_R: + GET_SET(be->flm.v25.scrub[index].r, value); + break; + + case HW_FLM_SCRUB_DEL: + GET_SET(be->flm.v25.scrub[index].del, value); + break; + + case HW_FLM_SCRUB_INF: + GET_SET(be->flm.v25.scrub[index].inf, value); + break; + + default: + return UNSUP_FIELD; + } + + break; + + default: + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field, int index, + uint32_t value) +{ + return hw_mod_flm_scrub_mod(be, field, index, &value, 0); +} diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c index df5c00ac42..1750d09afb 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_hsh.c @@ -89,3 +89,182 @@ int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count); } + +static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be, enum hw_hsh_e field, uint32_t index, + uint32_t word_off, uint32_t *value, int get) +{ + if (index >= be->hsh.nb_rcp) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 5: + switch (field) { + case HW_HSH_RCP_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->hsh.v5.rcp[index], (uint8_t)*value, + sizeof(struct hsh_v5_rcp_s)); + break; + + case HW_HSH_RCP_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if ((unsigned int)word_off >= be->hsh.nb_rcp) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->hsh.v5.rcp, struct hsh_v5_rcp_s, index, word_off); + break; + + case HW_HSH_RCP_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if ((unsigned int)word_off >= be->hsh.nb_rcp) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->hsh.v5.rcp, struct hsh_v5_rcp_s, index, word_off, + be->hsh.nb_rcp); + break; + + case HW_HSH_RCP_LOAD_DIST_TYPE: + GET_SET(be->hsh.v5.rcp[index].load_dist_type, value); + break; + + case HW_HSH_RCP_MAC_PORT_MASK: + if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE) { + WORD_OFF_TOO_LARGE_LOG; + return WORD_OFF_TOO_LARGE; + } + + GET_SET(be->hsh.v5.rcp[index].mac_port_mask[word_off], value); + break; + + case HW_HSH_RCP_SORT: + GET_SET(be->hsh.v5.rcp[index].sort, value); + break; + + case HW_HSH_RCP_QW0_PE: + GET_SET(be->hsh.v5.rcp[index].qw0_pe, value); + break; + + case HW_HSH_RCP_QW0_OFS: + GET_SET_SIGNED(be->hsh.v5.rcp[index].qw0_ofs, value); + break; + + case HW_HSH_RCP_QW4_PE: + GET_SET(be->hsh.v5.rcp[index].qw4_pe, value); + break; + + case HW_HSH_RCP_QW4_OFS: + GET_SET_SIGNED(be->hsh.v5.rcp[index].qw4_ofs, value); + break; + + case HW_HSH_RCP_W8_PE: + GET_SET(be->hsh.v5.rcp[index].w8_pe, value); + break; + + case HW_HSH_RCP_W8_OFS: + GET_SET_SIGNED(be->hsh.v5.rcp[index].w8_ofs, value); + break; + + case HW_HSH_RCP_W8_SORT: + GET_SET(be->hsh.v5.rcp[index].w8_sort, value); + break; + + case HW_HSH_RCP_W9_PE: + GET_SET(be->hsh.v5.rcp[index].w9_pe, value); + break; + + case HW_HSH_RCP_W9_OFS: + GET_SET_SIGNED(be->hsh.v5.rcp[index].w9_ofs, value); + break; + + case HW_HSH_RCP_W9_SORT: + GET_SET(be->hsh.v5.rcp[index].w9_sort, value); + break; + + case HW_HSH_RCP_W9_P: + GET_SET(be->hsh.v5.rcp[index].w9_p, value); + break; + + case HW_HSH_RCP_P_MASK: + GET_SET(be->hsh.v5.rcp[index].p_mask, value); + break; + + case HW_HSH_RCP_WORD_MASK: + if (word_off > HSH_RCP_WORD_MASK_SIZE) { + WORD_OFF_TOO_LARGE_LOG; + return WORD_OFF_TOO_LARGE; + } + + GET_SET(be->hsh.v5.rcp[index].word_mask[word_off], value); + break; + + case HW_HSH_RCP_SEED: + GET_SET(be->hsh.v5.rcp[index].seed, value); + break; + + case HW_HSH_RCP_TNL_P: + GET_SET(be->hsh.v5.rcp[index].tnl_p, value); + break; + + case HW_HSH_RCP_HSH_VALID: + GET_SET(be->hsh.v5.rcp[index].hsh_valid, value); + break; + + case HW_HSH_RCP_HSH_TYPE: + GET_SET(be->hsh.v5.rcp[index].hsh_type, value); + break; + + case HW_HSH_RCP_TOEPLITZ: + GET_SET(be->hsh.v5.rcp[index].toeplitz, value); + break; + + case HW_HSH_RCP_K: + if (word_off > HSH_RCP_KEY_SIZE) { + WORD_OFF_TOO_LARGE_LOG; + return WORD_OFF_TOO_LARGE; + } + + GET_SET(be->hsh.v5.rcp[index].k[word_off], value); + break; + + case HW_HSH_RCP_AUTO_IPV4_MASK: + GET_SET(be->hsh.v5.rcp[index].auto_ipv4_mask, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 5 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field, uint32_t index, + uint32_t word_off, uint32_t value) +{ + return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0); +} diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c index 532884ca01..b8a30671c3 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_km.c @@ -165,6 +165,240 @@ int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count) return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count); } +static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field, int index, + int word_off, uint32_t *value, int get) +{ + if ((unsigned int)index >= be->km.nb_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 7: + switch (field) { + case HW_KM_RCP_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->km.v7.rcp[index], (uint8_t)*value, sizeof(struct km_v7_rcp_s)); + break; + + case HW_KM_RCP_QW0_DYN: + GET_SET(be->km.v7.rcp[index].qw0_dyn, value); + break; + + case HW_KM_RCP_QW0_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].qw0_ofs, value); + break; + + case HW_KM_RCP_QW0_SEL_A: + GET_SET(be->km.v7.rcp[index].qw0_sel_a, value); + break; + + case HW_KM_RCP_QW0_SEL_B: + GET_SET(be->km.v7.rcp[index].qw0_sel_b, value); + break; + + case HW_KM_RCP_QW4_DYN: + GET_SET(be->km.v7.rcp[index].qw4_dyn, value); + break; + + case HW_KM_RCP_QW4_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].qw4_ofs, value); + break; + + case HW_KM_RCP_QW4_SEL_A: + GET_SET(be->km.v7.rcp[index].qw4_sel_a, value); + break; + + case HW_KM_RCP_QW4_SEL_B: + GET_SET(be->km.v7.rcp[index].qw4_sel_b, value); + break; + + case HW_KM_RCP_DW8_DYN: + GET_SET(be->km.v7.rcp[index].dw8_dyn, value); + break; + + case HW_KM_RCP_DW8_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].dw8_ofs, value); + break; + + case HW_KM_RCP_DW8_SEL_A: + GET_SET(be->km.v7.rcp[index].dw8_sel_a, value); + break; + + case HW_KM_RCP_DW8_SEL_B: + GET_SET(be->km.v7.rcp[index].dw8_sel_b, value); + break; + + case HW_KM_RCP_DW10_DYN: + GET_SET(be->km.v7.rcp[index].dw10_dyn, value); + break; + + case HW_KM_RCP_DW10_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].dw10_ofs, value); + break; + + case HW_KM_RCP_DW10_SEL_A: + GET_SET(be->km.v7.rcp[index].dw10_sel_a, value); + break; + + case HW_KM_RCP_DW10_SEL_B: + GET_SET(be->km.v7.rcp[index].dw10_sel_b, value); + break; + + case HW_KM_RCP_SWX_CCH: + GET_SET(be->km.v7.rcp[index].swx_cch, value); + break; + + case HW_KM_RCP_SWX_SEL_A: + GET_SET(be->km.v7.rcp[index].swx_sel_a, value); + break; + + case HW_KM_RCP_SWX_SEL_B: + GET_SET(be->km.v7.rcp[index].swx_sel_b, value); + break; + + case HW_KM_RCP_MASK_A: + if (word_off > KM_RCP_MASK_D_A_SIZE) { + WORD_OFF_TOO_LARGE_LOG; + return WORD_OFF_TOO_LARGE; + } + + GET_SET(be->km.v7.rcp[index].mask_d_a[word_off], value); + break; + + case HW_KM_RCP_MASK_B: + if (word_off > KM_RCP_MASK_B_SIZE) { + WORD_OFF_TOO_LARGE_LOG; + return WORD_OFF_TOO_LARGE; + } + + GET_SET(be->km.v7.rcp[index].mask_b[word_off], value); + break; + + case HW_KM_RCP_DUAL: + GET_SET(be->km.v7.rcp[index].dual, value); + break; + + case HW_KM_RCP_PAIRED: + GET_SET(be->km.v7.rcp[index].paired, value); + break; + + case HW_KM_RCP_EL_A: + GET_SET(be->km.v7.rcp[index].el_a, value); + break; + + case HW_KM_RCP_EL_B: + GET_SET(be->km.v7.rcp[index].el_b, value); + break; + + case HW_KM_RCP_INFO_A: + GET_SET(be->km.v7.rcp[index].info_a, value); + break; + + case HW_KM_RCP_INFO_B: + GET_SET(be->km.v7.rcp[index].info_b, value); + break; + + case HW_KM_RCP_FTM_A: + GET_SET(be->km.v7.rcp[index].ftm_a, value); + break; + + case HW_KM_RCP_FTM_B: + GET_SET(be->km.v7.rcp[index].ftm_b, value); + break; + + case HW_KM_RCP_BANK_A: + GET_SET(be->km.v7.rcp[index].bank_a, value); + break; + + case HW_KM_RCP_BANK_B: + GET_SET(be->km.v7.rcp[index].bank_b, value); + break; + + case HW_KM_RCP_KL_A: + GET_SET(be->km.v7.rcp[index].kl_a, value); + break; + + case HW_KM_RCP_KL_B: + GET_SET(be->km.v7.rcp[index].kl_b, value); + break; + + case HW_KM_RCP_KEYWAY_A: + GET_SET(be->km.v7.rcp[index].keyway_a, value); + break; + + case HW_KM_RCP_KEYWAY_B: + GET_SET(be->km.v7.rcp[index].keyway_b, value); + break; + + case HW_KM_RCP_SYNERGY_MODE: + GET_SET(be->km.v7.rcp[index].synergy_mode, value); + break; + + case HW_KM_RCP_DW0_B_DYN: + GET_SET(be->km.v7.rcp[index].dw0_b_dyn, value); + break; + + case HW_KM_RCP_DW0_B_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].dw0_b_ofs, value); + break; + + case HW_KM_RCP_DW2_B_DYN: + GET_SET(be->km.v7.rcp[index].dw2_b_dyn, value); + break; + + case HW_KM_RCP_DW2_B_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].dw2_b_ofs, value); + break; + + case HW_KM_RCP_SW4_B_DYN: + GET_SET(be->km.v7.rcp[index].sw4_b_dyn, value); + break; + + case HW_KM_RCP_SW4_B_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].sw4_b_ofs, value); + break; + + case HW_KM_RCP_SW5_B_DYN: + GET_SET(be->km.v7.rcp[index].sw5_b_dyn, value); + break; + + case HW_KM_RCP_SW5_B_OFS: + GET_SET_SIGNED(be->km.v7.rcp[index].sw5_b_ofs, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 7 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field, int index, int word_off, + uint32_t value) +{ + return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0); +} + +int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field, int index, int word_off, + uint32_t *value) +{ + return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1); +} + int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank, int start_record, int count) { if (count == ALL_ENTRIES) @@ -180,6 +414,103 @@ int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank, int start return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank, start_record, count); } +static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field, int bank, + int record, uint32_t *value, int get) +{ + if ((unsigned int)bank >= be->km.nb_cam_banks) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + if ((unsigned int)record >= be->km.nb_cam_records) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + unsigned int index = bank * be->km.nb_cam_records + record; + + switch (_VER_) { + case 7: + switch (field) { + case HW_KM_CAM_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->km.v7.cam[index], (uint8_t)*value, sizeof(struct km_v7_cam_s)); + break; + + case HW_KM_CAM_W0: + GET_SET(be->km.v7.cam[index].w0, value); + break; + + case HW_KM_CAM_W1: + GET_SET(be->km.v7.cam[index].w1, value); + break; + + case HW_KM_CAM_W2: + GET_SET(be->km.v7.cam[index].w2, value); + break; + + case HW_KM_CAM_W3: + GET_SET(be->km.v7.cam[index].w3, value); + break; + + case HW_KM_CAM_W4: + GET_SET(be->km.v7.cam[index].w4, value); + break; + + case HW_KM_CAM_W5: + GET_SET(be->km.v7.cam[index].w5, value); + break; + + case HW_KM_CAM_FT0: + GET_SET(be->km.v7.cam[index].ft0, value); + break; + + case HW_KM_CAM_FT1: + GET_SET(be->km.v7.cam[index].ft1, value); + break; + + case HW_KM_CAM_FT2: + GET_SET(be->km.v7.cam[index].ft2, value); + break; + + case HW_KM_CAM_FT3: + GET_SET(be->km.v7.cam[index].ft3, value); + break; + + case HW_KM_CAM_FT4: + GET_SET(be->km.v7.cam[index].ft4, value); + break; + + case HW_KM_CAM_FT5: + GET_SET(be->km.v7.cam[index].ft5, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 7 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int record, + uint32_t value) +{ + return hw_mod_km_cam_mod(be, field, bank, record, &value, 0); +} + int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank, int count) { if (count == ALL_ENTRIES) @@ -273,6 +604,12 @@ int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field, int ba return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set, 0); } +int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int byte, + int byte_val, uint32_t *value_set) +{ + return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set, 1); +} + int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank, int start_record, int count) { if (count == ALL_ENTRIES) @@ -288,6 +625,49 @@ int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank, int start return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank, start_record, count); } +static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field, int bank, + int record, uint32_t *value, int get) +{ + unsigned int index = bank * be->km.nb_tcam_bank_width + record; + + if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width)) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 7: + switch (field) { + case HW_KM_TCI_COLOR: + GET_SET(be->km.v7.tci[index].color, value); + break; + + case HW_KM_TCI_FT: + GET_SET(be->km.v7.tci[index].ft, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 7 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field, int bank, int record, + uint32_t value) +{ + return hw_mod_km_tci_mod(be, field, bank, record, &value, 0); +} + int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank, int start_record, int count) { if (count == ALL_ENTRIES) diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c index c3facacb08..59285405ba 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_pdb.c @@ -85,6 +85,150 @@ int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count); } +static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be, enum hw_pdb_e field, uint32_t index, + uint32_t *value, int get) +{ + if (index >= be->pdb.nb_pdb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 9: + switch (field) { + case HW_PDB_RCP_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->pdb.v9.rcp[index], (uint8_t)*value, + sizeof(struct pdb_v9_rcp_s)); + break; + + case HW_PDB_RCP_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->pdb.nb_pdb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->pdb.v9.rcp, struct pdb_v9_rcp_s, index, *value, + be->pdb.nb_pdb_rcp_categories); + break; + + case HW_PDB_RCP_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->pdb.nb_pdb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->pdb.v9.rcp, struct pdb_v9_rcp_s, index, *value); + break; + + case HW_PDB_RCP_DESCRIPTOR: + GET_SET(be->pdb.v9.rcp[index].descriptor, value); + break; + + case HW_PDB_RCP_DESC_LEN: + GET_SET(be->pdb.v9.rcp[index].desc_len, value); + break; + + case HW_PDB_RCP_TX_PORT: + GET_SET(be->pdb.v9.rcp[index].tx_port, value); + break; + + case HW_PDB_RCP_TX_IGNORE: + GET_SET(be->pdb.v9.rcp[index].tx_ignore, value); + break; + + case HW_PDB_RCP_TX_NOW: + GET_SET(be->pdb.v9.rcp[index].tx_now, value); + break; + + case HW_PDB_RCP_CRC_OVERWRITE: + GET_SET(be->pdb.v9.rcp[index].crc_overwrite, value); + break; + + case HW_PDB_RCP_ALIGN: + GET_SET(be->pdb.v9.rcp[index].align, value); + break; + + case HW_PDB_RCP_OFS0_DYN: + GET_SET(be->pdb.v9.rcp[index].ofs0_dyn, value); + break; + + case HW_PDB_RCP_OFS0_REL: + GET_SET_SIGNED(be->pdb.v9.rcp[index].ofs0_rel, value); + break; + + case HW_PDB_RCP_OFS1_DYN: + GET_SET(be->pdb.v9.rcp[index].ofs1_dyn, value); + break; + + case HW_PDB_RCP_OFS1_REL: + GET_SET_SIGNED(be->pdb.v9.rcp[index].ofs1_rel, value); + break; + + case HW_PDB_RCP_OFS2_DYN: + GET_SET(be->pdb.v9.rcp[index].ofs2_dyn, value); + break; + + case HW_PDB_RCP_OFS2_REL: + GET_SET_SIGNED(be->pdb.v9.rcp[index].ofs2_rel, value); + break; + + case HW_PDB_RCP_IP_PROT_TNL: + GET_SET(be->pdb.v9.rcp[index].ip_prot_tnl, value); + break; + + case HW_PDB_RCP_PPC_HSH: + GET_SET(be->pdb.v9.rcp[index].ppc_hsh, value); + break; + + case HW_PDB_RCP_DUPLICATE_EN: + GET_SET(be->pdb.v9.rcp[index].duplicate_en, value); + break; + + case HW_PDB_RCP_DUPLICATE_BIT: + GET_SET(be->pdb.v9.rcp[index].duplicate_bit, value); + break; + + case HW_PDB_RCP_PCAP_KEEP_FCS: + GET_SET(be->pdb.v9.rcp[index].pcap_keep_fcs, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 9 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field, uint32_t index, + uint32_t value) +{ + return hw_mod_pdb_rcp_mod(be, field, index, &value, 0); +} + int hw_mod_pdb_config_flush(struct flow_api_backend_s *be) { return be->iface->pdb_config_flush(be->be_dev, &be->pdb); diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c index 93b37d595e..70fe97a298 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_qsl.c @@ -104,6 +104,114 @@ int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count); } +static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t *value, int get) +{ + if (index >= be->qsl.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 7: + switch (field) { + case HW_QSL_RCP_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->qsl.v7.rcp[index], (uint8_t)*value, + sizeof(struct qsl_v7_rcp_s)); + break; + + case HW_QSL_RCP_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->qsl.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->qsl.v7.rcp, struct qsl_v7_rcp_s, index, *value, + be->qsl.nb_rcp_categories); + break; + + case HW_QSL_RCP_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->qsl.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->qsl.v7.rcp, struct qsl_v7_rcp_s, index, *value); + break; + + case HW_QSL_RCP_DISCARD: + GET_SET(be->qsl.v7.rcp[index].discard, value); + break; + + case HW_QSL_RCP_DROP: + GET_SET(be->qsl.v7.rcp[index].drop, value); + break; + + case HW_QSL_RCP_TBL_LO: + GET_SET(be->qsl.v7.rcp[index].tbl_lo, value); + break; + + case HW_QSL_RCP_TBL_HI: + GET_SET(be->qsl.v7.rcp[index].tbl_hi, value); + break; + + case HW_QSL_RCP_TBL_IDX: + GET_SET(be->qsl.v7.rcp[index].tbl_idx, value); + break; + + case HW_QSL_RCP_TBL_MSK: + GET_SET(be->qsl.v7.rcp[index].tbl_msk, value); + break; + + case HW_QSL_RCP_LR: + GET_SET(be->qsl.v7.rcp[index].lr, value); + break; + + case HW_QSL_RCP_TSA: + GET_SET(be->qsl.v7.rcp[index].tsa, value); + break; + + case HW_QSL_RCP_VLI: + GET_SET(be->qsl.v7.rcp[index].vli, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 7 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t value) +{ + return hw_mod_qsl_rcp_mod(be, field, index, &value, 0); +} + int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) @@ -117,6 +225,73 @@ int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count); } +static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t *value, int get) +{ + if (index >= be->qsl.nb_qst_entries) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 7: + switch (field) { + case HW_QSL_QST_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->qsl.v7.qst[index], (uint8_t)*value, + sizeof(struct qsl_v7_qst_s)); + break; + + case HW_QSL_QST_QUEUE: + GET_SET(be->qsl.v7.qst[index].queue, value); + break; + + case HW_QSL_QST_EN: + GET_SET(be->qsl.v7.qst[index].en, value); + break; + + case HW_QSL_QST_TX_PORT: + GET_SET(be->qsl.v7.qst[index].tx_port, value); + break; + + case HW_QSL_QST_LRE: + GET_SET(be->qsl.v7.qst[index].lre, value); + break; + + case HW_QSL_QST_TCI: + GET_SET(be->qsl.v7.qst[index].tci, value); + break; + + case HW_QSL_QST_VEN: + GET_SET(be->qsl.v7.qst[index].ven, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 7 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t value) +{ + return hw_mod_qsl_qst_mod(be, field, index, &value, 0); +} + int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) @@ -130,6 +305,49 @@ int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx, int count return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count); } +static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t *value, int get) +{ + if (index >= QSL_QEN_ENTRIES) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 7: + switch (field) { + case HW_QSL_QEN_EN: + GET_SET(be->qsl.v7.qen[index].en, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + /* end case 7 */ + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t value) +{ + return hw_mod_qsl_qen_mod(be, field, index, &value, 0); +} + +int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field, uint32_t index, + uint32_t *value) +{ + return hw_mod_qsl_qen_mod(be, field, index, value, 1); +} + int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx, int count) { if (count == ALL_ENTRIES) diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c index 1d878f3f96..30e5e38690 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_slc_lr.c @@ -66,3 +66,103 @@ int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int co return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx, count); } + +static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be, enum hw_slc_lr_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->max_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 2: + switch (field) { + case HW_SLC_LR_RCP_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value, + sizeof(struct hw_mod_slc_lr_v2_s)); + break; + + case HW_SLC_LR_RCP_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->max_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->slc_lr.v2.rcp, struct hw_mod_slc_lr_v2_s, index, + *value, be->max_categories); + break; + + case HW_SLC_LR_RCP_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->max_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->slc_lr.v2.rcp, struct hw_mod_slc_lr_v2_s, index, + *value); + break; + + case HW_SLC_LR_RCP_HEAD_SLC_EN: + GET_SET(be->slc_lr.v2.rcp[index].head_slc_en, value); + break; + + case HW_SLC_LR_RCP_HEAD_DYN: + GET_SET(be->slc_lr.v2.rcp[index].head_dyn, value); + break; + + case HW_SLC_LR_RCP_HEAD_OFS: + GET_SET_SIGNED(be->slc_lr.v2.rcp[index].head_ofs, value); + break; + + case HW_SLC_LR_RCP_TAIL_SLC_EN: + GET_SET(be->slc_lr.v2.rcp[index].tail_slc_en, value); + break; + + case HW_SLC_LR_RCP_TAIL_DYN: + GET_SET(be->slc_lr.v2.rcp[index].tail_dyn, value); + break; + + case HW_SLC_LR_RCP_TAIL_OFS: + GET_SET_SIGNED(be->slc_lr.v2.rcp[index].tail_ofs, value); + break; + + case HW_SLC_LR_RCP_PCAP: + GET_SET(be->slc_lr.v2.rcp[index].pcap, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field, uint32_t index, + uint32_t value) +{ + return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0); +} diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c index 0d73b795d5..2c3ed2355b 100644 --- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c +++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c @@ -152,6 +152,54 @@ int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, i return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_ifr_categories) + return INDEX_TOO_LARGE; + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_IFR_RCP_IPV4_EN: + GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv4_en, value); + break; + + case HW_TPE_IFR_RCP_IPV4_DF_DROP: + GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv4_df_drop, value); + break; + + case HW_TPE_IFR_RCP_IPV6_EN: + GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv6_en, value); + break; + + case HW_TPE_IFR_RCP_IPV6_DROP: + GET_SET(be->tpe.v3.rpp_ifr_rcp[index].ipv6_drop, value); + break; + + case HW_TPE_IFR_RCP_MTU: + GET_SET(be->tpe.v3.rpp_ifr_rcp[index].mtu, value); + break; + + default: + return UNSUP_FIELD; + } + + break; + + default: + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0); +} + /* * RPP_RCP */ @@ -169,6 +217,82 @@ int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.rpp_rcp[index], (uint8_t)*value, + sizeof(struct tpe_v1_rpp_v0_rcp_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.rpp_rcp, struct tpe_v1_rpp_v0_rcp_s, index, + *value, be->tpe.nb_rcp_categories); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.rpp_rcp, struct tpe_v1_rpp_v0_rcp_s, index, + *value); + break; + + case HW_TPE_RPP_RCP_EXP: + GET_SET(be->tpe.v3.rpp_rcp[index].exp, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0); +} + /* * IFR_RCP */ @@ -186,6 +310,54 @@ int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_ifr_categories) + return INDEX_TOO_LARGE; + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_IFR_RCP_IPV4_EN: + GET_SET(be->tpe.v3.ifr_rcp[index].ipv4_en, value); + break; + + case HW_TPE_IFR_RCP_IPV4_DF_DROP: + GET_SET(be->tpe.v3.ifr_rcp[index].ipv4_df_drop, value); + break; + + case HW_TPE_IFR_RCP_IPV6_EN: + GET_SET(be->tpe.v3.ifr_rcp[index].ipv6_en, value); + break; + + case HW_TPE_IFR_RCP_IPV6_DROP: + GET_SET(be->tpe.v3.ifr_rcp[index].ipv6_drop, value); + break; + + case HW_TPE_IFR_RCP_MTU: + GET_SET(be->tpe.v3.ifr_rcp[index].mtu, value); + break; + + default: + return UNSUP_FIELD; + } + + break; + + default: + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0); +} + /* * INS_RCP */ @@ -203,6 +375,90 @@ int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.ins_rcp[index], (uint8_t)*value, + sizeof(struct tpe_v1_ins_v1_rcp_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.ins_rcp, struct tpe_v1_ins_v1_rcp_s, index, + *value, be->tpe.nb_rcp_categories); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.ins_rcp, struct tpe_v1_ins_v1_rcp_s, index, + *value); + break; + + case HW_TPE_INS_RCP_DYN: + GET_SET(be->tpe.v3.ins_rcp[index].dyn, value); + break; + + case HW_TPE_INS_RCP_OFS: + GET_SET(be->tpe.v3.ins_rcp[index].ofs, value); + break; + + case HW_TPE_INS_RCP_LEN: + GET_SET(be->tpe.v3.ins_rcp[index].len, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0); +} + /* * RPL_RCP */ @@ -220,6 +476,102 @@ int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.rpl_rcp[index], (uint8_t)*value, + sizeof(struct tpe_v3_rpl_v4_rcp_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.rpl_rcp, struct tpe_v3_rpl_v4_rcp_s, index, + *value, be->tpe.nb_rcp_categories); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.rpl_rcp, struct tpe_v3_rpl_v4_rcp_s, index, + *value); + break; + + case HW_TPE_RPL_RCP_DYN: + GET_SET(be->tpe.v3.rpl_rcp[index].dyn, value); + break; + + case HW_TPE_RPL_RCP_OFS: + GET_SET(be->tpe.v3.rpl_rcp[index].ofs, value); + break; + + case HW_TPE_RPL_RCP_LEN: + GET_SET(be->tpe.v3.rpl_rcp[index].len, value); + break; + + case HW_TPE_RPL_RCP_RPL_PTR: + GET_SET(be->tpe.v3.rpl_rcp[index].rpl_ptr, value); + break; + + case HW_TPE_RPL_RCP_EXT_PRIO: + GET_SET(be->tpe.v3.rpl_rcp[index].ext_prio, value); + break; + + case HW_TPE_RPL_RCP_ETH_TYPE_WR: + GET_SET(be->tpe.v3.rpl_rcp[index].eth_type_wr, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0); +} + /* * RPL_EXT */ @@ -237,6 +589,86 @@ int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rpl_ext_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.rpl_ext[index], (uint8_t)*value, + sizeof(struct tpe_v1_rpl_v2_ext_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rpl_ext_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.rpl_ext, struct tpe_v1_rpl_v2_ext_s, index, + *value, be->tpe.nb_rpl_ext_categories); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rpl_ext_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.rpl_ext, struct tpe_v1_rpl_v2_ext_s, index, + *value); + break; + + case HW_TPE_RPL_EXT_RPL_PTR: + GET_SET(be->tpe.v3.rpl_ext[index].rpl_ptr, value); + break; + + case HW_TPE_RPL_EXT_META_RPL_LEN: + GET_SET(be->tpe.v3.rpl_ext[index].meta_rpl_len, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0); +} + /* * RPL_RPL */ @@ -254,6 +686,89 @@ int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rpl_depth) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.rpl_rpl[index], (uint8_t)*value, + sizeof(struct tpe_v1_rpl_v2_rpl_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rpl_depth) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.rpl_rpl, struct tpe_v1_rpl_v2_rpl_s, index, + *value, be->tpe.nb_rpl_depth); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rpl_depth) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.rpl_rpl, struct tpe_v1_rpl_v2_rpl_s, index, + *value); + break; + + case HW_TPE_RPL_RPL_VALUE: + if (get) + memcpy(value, be->tpe.v3.rpl_rpl[index].value, + sizeof(uint32_t) * 4); + + else + memcpy(be->tpe.v3.rpl_rpl[index].value, value, + sizeof(uint32_t) * 4); + + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t *value) +{ + return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0); +} + /* * CPY_RCP */ @@ -273,6 +788,96 @@ int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + const uint32_t cpy_size = be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories; + + if (index >= cpy_size) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.cpy_rcp[index], (uint8_t)*value, + sizeof(struct tpe_v1_cpy_v1_rcp_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= cpy_size) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.cpy_rcp, struct tpe_v1_cpy_v1_rcp_s, index, + *value, cpy_size); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= cpy_size) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.cpy_rcp, struct tpe_v1_cpy_v1_rcp_s, index, + *value); + break; + + case HW_TPE_CPY_RCP_READER_SELECT: + GET_SET(be->tpe.v3.cpy_rcp[index].reader_select, value); + break; + + case HW_TPE_CPY_RCP_DYN: + GET_SET(be->tpe.v3.cpy_rcp[index].dyn, value); + break; + + case HW_TPE_CPY_RCP_OFS: + GET_SET(be->tpe.v3.cpy_rcp[index].ofs, value); + break; + + case HW_TPE_CPY_RCP_LEN: + GET_SET(be->tpe.v3.cpy_rcp[index].len, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0); +} + /* * HFU_RCP */ @@ -290,6 +895,166 @@ int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } +static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.hfu_rcp[index], (uint8_t)*value, + sizeof(struct tpe_v1_hfu_v1_rcp_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.hfu_rcp, struct tpe_v1_hfu_v1_rcp_s, index, + *value, be->tpe.nb_rcp_categories); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.hfu_rcp, struct tpe_v1_hfu_v1_rcp_s, index, + *value); + break; + + case HW_TPE_HFU_RCP_LEN_A_WR: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_wr, value); + break; + + case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_outer_l4_len, value); + break; + + case HW_TPE_HFU_RCP_LEN_A_POS_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_pos_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_A_POS_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_pos_ofs, value); + break; + + case HW_TPE_HFU_RCP_LEN_A_ADD_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_add_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_A_ADD_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_add_ofs, value); + break; + + case HW_TPE_HFU_RCP_LEN_A_SUB_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_a_sub_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_B_WR: + GET_SET(be->tpe.v3.hfu_rcp[index].len_b_wr, value); + break; + + case HW_TPE_HFU_RCP_LEN_B_POS_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_b_pos_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_B_POS_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].len_b_pos_ofs, value); + break; + + case HW_TPE_HFU_RCP_LEN_B_ADD_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_b_add_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_B_ADD_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].len_b_add_ofs, value); + break; + + case HW_TPE_HFU_RCP_LEN_B_SUB_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_b_sub_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_C_WR: + GET_SET(be->tpe.v3.hfu_rcp[index].len_c_wr, value); + break; + + case HW_TPE_HFU_RCP_LEN_C_POS_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_c_pos_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_C_POS_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].len_c_pos_ofs, value); + break; + + case HW_TPE_HFU_RCP_LEN_C_ADD_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_c_add_dyn, value); + break; + + case HW_TPE_HFU_RCP_LEN_C_ADD_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].len_c_add_ofs, value); + break; + + case HW_TPE_HFU_RCP_LEN_C_SUB_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].len_c_sub_dyn, value); + break; + + case HW_TPE_HFU_RCP_TTL_WR: + GET_SET(be->tpe.v3.hfu_rcp[index].ttl_wr, value); + break; + + case HW_TPE_HFU_RCP_TTL_POS_DYN: + GET_SET(be->tpe.v3.hfu_rcp[index].ttl_pos_dyn, value); + break; + + case HW_TPE_HFU_RCP_TTL_POS_OFS: + GET_SET(be->tpe.v3.hfu_rcp[index].ttl_pos_ofs, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0); +} + /* * CSU_RCP */ @@ -306,3 +1071,91 @@ int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx, count); } + +static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field, + uint32_t index, uint32_t *value, int get) +{ + if (index >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + switch (_VER_) { + case 3: + switch (field) { + case HW_TPE_PRESET_ALL: + if (get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + memset(&be->tpe.v3.csu_rcp[index], (uint8_t)*value, + sizeof(struct tpe_v1_csu_v0_rcp_s)); + break; + + case HW_TPE_FIND: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + FIND_EQUAL_INDEX(be->tpe.v3.csu_rcp, struct tpe_v1_csu_v0_rcp_s, index, + *value, be->tpe.nb_rcp_categories); + break; + + case HW_TPE_COMPARE: + if (!get) { + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + if (*value >= be->tpe.nb_rcp_categories) { + INDEX_TOO_LARGE_LOG; + return INDEX_TOO_LARGE; + } + + DO_COMPARE_INDEXS(be->tpe.v3.csu_rcp, struct tpe_v1_csu_v0_rcp_s, index, + *value); + break; + + case HW_TPE_CSU_RCP_OUTER_L3_CMD: + GET_SET(be->tpe.v3.csu_rcp[index].ol3_cmd, value); + break; + + case HW_TPE_CSU_RCP_OUTER_L4_CMD: + GET_SET(be->tpe.v3.csu_rcp[index].ol4_cmd, value); + break; + + case HW_TPE_CSU_RCP_INNER_L3_CMD: + GET_SET(be->tpe.v3.csu_rcp[index].il3_cmd, value); + break; + + case HW_TPE_CSU_RCP_INNER_L4_CMD: + GET_SET(be->tpe.v3.csu_rcp[index].il4_cmd, value); + break; + + default: + UNSUP_FIELD_LOG; + return UNSUP_FIELD; + } + + break; + + default: + UNSUP_VER_LOG; + return UNSUP_VER; + } + + return 0; +} + +int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index, + uint32_t value) +{ + return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0); +} diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_age_queue.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_age_queue.c new file mode 100644 index 0000000000..d916eccec7 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_age_queue.c @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#include +#include + +#include "ntlog.h" +#include "flm_age_queue.h" + +/* Queues for flm aged events */ +static struct rte_ring *age_queue[MAX_EVT_AGE_QUEUES]; +static RTE_ATOMIC(uint16_t) age_event[MAX_EVT_AGE_PORTS]; + +__rte_always_inline int flm_age_event_get(uint8_t port) +{ + return rte_atomic_load_explicit(&age_event[port], rte_memory_order_seq_cst); +} + +__rte_always_inline void flm_age_event_set(uint8_t port) +{ + rte_atomic_store_explicit(&age_event[port], 1, rte_memory_order_seq_cst); +} + +__rte_always_inline void flm_age_event_clear(uint8_t port) +{ + rte_atomic_store_explicit(&age_event[port], 0, rte_memory_order_seq_cst); +} + +void flm_age_queue_free(uint8_t port, uint16_t caller_id) +{ + struct rte_ring *q = NULL; + + if (port < MAX_EVT_AGE_PORTS) + rte_atomic_store_explicit(&age_event[port], 0, rte_memory_order_seq_cst); + + if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) { + q = age_queue[caller_id]; + age_queue[caller_id] = NULL; + } + + if (q != NULL) + rte_ring_free(q); +} + +void flm_age_queue_free_all(void) +{ + int i; + int j; + + for (i = 0; i < MAX_EVT_AGE_PORTS; i++) + for (j = 0; j < MAX_EVT_AGE_QUEUES; j++) + flm_age_queue_free(i, j); +} + +struct rte_ring *flm_age_queue_create(uint8_t port, uint16_t caller_id, unsigned int count) +{ + char name[20]; + struct rte_ring *q = NULL; + + if (rte_is_power_of_2(count) == false || count > RTE_RING_SZ_MASK) { + NT_LOG(WRN, + FILTER, + "FLM aged event queue number of elements (%u) is invalid, must be power of 2, and not exceed %u", + count, + RTE_RING_SZ_MASK); + return NULL; + } + + if (port >= MAX_EVT_AGE_PORTS) { + NT_LOG(WRN, + FILTER, + "FLM aged event queue cannot be created for port %u. Max supported port is %u", + port, + MAX_EVT_AGE_PORTS - 1); + return NULL; + } + + rte_atomic_store_explicit(&age_event[port], 0, rte_memory_order_seq_cst); + + if (caller_id >= MAX_EVT_AGE_QUEUES) { + NT_LOG(WRN, + FILTER, + "FLM aged event queue cannot be created for caller_id %u. Max supported caller_id is %u", + caller_id, + MAX_EVT_AGE_QUEUES - 1); + return NULL; + } + + if (age_queue[caller_id] != NULL) { + NT_LOG(DBG, FILTER, "FLM aged event queue %u already created", caller_id); + return age_queue[caller_id]; + } + + snprintf(name, 20, "AGE_EVENT%u", caller_id); + q = rte_ring_create_elem(name, + FLM_AGE_ELEM_SIZE, + count, + SOCKET_ID_ANY, + RING_F_SP_ENQ | RING_F_SC_DEQ); + + if (q == NULL) { + NT_LOG(WRN, + FILTER, + "FLM aged event queue cannot be created due to error %02X", + rte_errno); + return NULL; + } + + age_queue[caller_id] = q; + + return q; +} + +void flm_age_queue_put(uint16_t caller_id, struct flm_age_event_s *obj) +{ + int ret; + + /* If queues is not created, then ignore and return */ + if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) { + ret = rte_ring_sp_enqueue_elem(age_queue[caller_id], obj, FLM_AGE_ELEM_SIZE); + + if (ret != 0) + NT_LOG(DBG, FILTER, "FLM aged event queue full"); + } +} + +int flm_age_queue_get(uint16_t caller_id, struct flm_age_event_s *obj) +{ + int ret; + + /* If queues is not created, then ignore and return */ + if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) { + ret = rte_ring_sc_dequeue_elem(age_queue[caller_id], obj, FLM_AGE_ELEM_SIZE); + + if (ret != 0) + NT_LOG(DBG, FILTER, "FLM aged event queue empty"); + + return ret; + } + + return -ENOENT; +} + +unsigned int flm_age_queue_count(uint16_t caller_id) +{ + unsigned int ret = 0; + + if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) + ret = rte_ring_count(age_queue[caller_id]); + + return ret; +} + +unsigned int flm_age_queue_get_size(uint16_t caller_id) +{ + unsigned int ret = 0; + + if (caller_id < MAX_EVT_AGE_QUEUES && age_queue[caller_id] != NULL) + ret = rte_ring_get_size(age_queue[caller_id]); + + return ret; +} diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_age_queue.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_age_queue.h new file mode 100644 index 0000000000..55c410ac86 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_age_queue.h @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _FLM_AGE_QUEUE_H_ +#define _FLM_AGE_QUEUE_H_ + +#include "stdint.h" + +struct flm_age_event_s { + void *context; +}; + +/* Indicates why the flow info record was generated */ +#define INF_DATA_CAUSE_SW_UNLEARN 0 +#define INF_DATA_CAUSE_TIMEOUT_FLOW_DELETED 1 +#define INF_DATA_CAUSE_NA 2 +#define INF_DATA_CAUSE_PERIODIC_FLOW_INFO 3 +#define INF_DATA_CAUSE_SW_PROBE 4 +#define INF_DATA_CAUSE_TIMEOUT_FLOW_KEPT 5 + +/* Max number of event queues */ +#define MAX_EVT_AGE_QUEUES 256 + +/* Max number of event ports */ +#define MAX_EVT_AGE_PORTS 128 + +#define FLM_AGE_ELEM_SIZE sizeof(struct flm_age_event_s) + +int flm_age_event_get(uint8_t port); +void flm_age_event_set(uint8_t port); +void flm_age_event_clear(uint8_t port); +void flm_age_queue_free(uint8_t port, uint16_t caller_id); +void flm_age_queue_free_all(void); +struct rte_ring *flm_age_queue_create(uint8_t port, uint16_t caller_id, unsigned int count); +void flm_age_queue_put(uint16_t caller_id, struct flm_age_event_s *obj); +int flm_age_queue_get(uint16_t caller_id, struct flm_age_event_s *obj); +unsigned int flm_age_queue_count(uint16_t caller_id); +unsigned int flm_age_queue_get_size(uint16_t caller_id); + +#endif /* _FLM_AGE_QUEUE_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c new file mode 100644 index 0000000000..d76c7da568 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c @@ -0,0 +1,293 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#include +#include +#include + +#include +#include + +#include "ntlog.h" +#include "flm_evt_queue.h" + +/* Local queues for flm statistic events */ +static struct rte_ring *info_q_local[MAX_INFO_LCL_QUEUES]; + +/* Remote queues for flm statistic events */ +static struct rte_ring *info_q_remote[MAX_INFO_RMT_QUEUES]; + +/* Local queues for flm status records */ +static struct rte_ring *stat_q_local[MAX_STAT_LCL_QUEUES]; + +/* Remote queues for flm status records */ +static struct rte_ring *stat_q_remote[MAX_STAT_RMT_QUEUES]; + +static void flm_inf_sta_queue_free(uint8_t port, uint8_t caller) +{ + struct rte_ring *q = NULL; + + /* If queues is not created, then ignore and return */ + switch (caller) { + case FLM_INFO_LOCAL: + if (port < MAX_INFO_LCL_QUEUES && info_q_local[port] != NULL) { + q = info_q_local[port]; + info_q_local[port] = NULL; + } + + break; + + case FLM_INFO_REMOTE: + if (port < MAX_INFO_RMT_QUEUES && info_q_remote[port] != NULL) { + q = info_q_remote[port]; + info_q_remote[port] = NULL; + } + + break; + + case FLM_STAT_LOCAL: + if (port < MAX_STAT_LCL_QUEUES && stat_q_local[port] != NULL) { + q = stat_q_local[port]; + stat_q_local[port] = NULL; + } + + break; + + case FLM_STAT_REMOTE: + if (port < MAX_STAT_RMT_QUEUES && stat_q_remote[port] != NULL) { + q = stat_q_remote[port]; + stat_q_remote[port] = NULL; + } + + break; + + default: + NT_LOG(ERR, FILTER, "FLM queue free illegal caller: %u", caller); + break; + } + + if (q) + rte_ring_free(q); +} + +void flm_inf_sta_queue_free_all(uint8_t caller) +{ + int count = 0; + + switch (caller) { + case FLM_INFO_LOCAL: + count = MAX_INFO_LCL_QUEUES; + break; + + case FLM_INFO_REMOTE: + count = MAX_INFO_RMT_QUEUES; + break; + + case FLM_STAT_LOCAL: + count = MAX_STAT_LCL_QUEUES; + break; + + case FLM_STAT_REMOTE: + count = MAX_STAT_RMT_QUEUES; + break; + + default: + NT_LOG(ERR, FILTER, "FLM queue free illegal caller: %u", caller); + return; + } + + for (int i = 0; i < count; i++) + flm_inf_sta_queue_free(i, caller); +} + +static struct rte_ring *flm_evt_queue_create(uint8_t port, uint8_t caller) +{ + static_assert((FLM_EVT_ELEM_SIZE & ~(size_t)3) == FLM_EVT_ELEM_SIZE, + "FLM EVENT struct size"); + static_assert((FLM_STAT_ELEM_SIZE & ~(size_t)3) == FLM_STAT_ELEM_SIZE, + "FLM STAT struct size"); + char name[20] = "NONE"; + struct rte_ring *q; + uint32_t elem_size = 0; + uint32_t queue_size = 0; + + switch (caller) { + case FLM_INFO_LOCAL: + if (port >= MAX_INFO_LCL_QUEUES) { + NT_LOG(WRN, + FILTER, + "FLM statistic event queue cannot be created for port %u. Max supported port is %u", + port, + MAX_INFO_LCL_QUEUES - 1); + return NULL; + } + + snprintf(name, 20, "LOCAL_INFO%u", port); + elem_size = FLM_EVT_ELEM_SIZE; + queue_size = FLM_EVT_QUEUE_SIZE; + break; + + case FLM_INFO_REMOTE: + if (port >= MAX_INFO_RMT_QUEUES) { + NT_LOG(WRN, + FILTER, + "FLM statistic event queue cannot be created for vport %u. Max supported vport is %u", + port, + MAX_INFO_RMT_QUEUES - 1); + return NULL; + } + + snprintf(name, 20, "REMOTE_INFO%u", port); + elem_size = FLM_EVT_ELEM_SIZE; + queue_size = FLM_EVT_QUEUE_SIZE; + break; + + case FLM_STAT_LOCAL: + if (port >= MAX_STAT_LCL_QUEUES) { + NT_LOG(WRN, + FILTER, + "FLM status queue cannot be created for port %u. Max supported port is %u", + port, + MAX_STAT_LCL_QUEUES - 1); + return NULL; + } + + snprintf(name, 20, "LOCAL_STAT%u", port); + elem_size = FLM_STAT_ELEM_SIZE; + queue_size = FLM_STAT_QUEUE_SIZE; + break; + + case FLM_STAT_REMOTE: + if (port >= MAX_STAT_RMT_QUEUES) { + NT_LOG(WRN, + FILTER, + "FLM status queue cannot be created for vport %u. Max supported vport is %u", + port, + MAX_STAT_RMT_QUEUES - 1); + return NULL; + } + + snprintf(name, 20, "REMOTE_STAT%u", port); + elem_size = FLM_STAT_ELEM_SIZE; + queue_size = FLM_STAT_QUEUE_SIZE; + break; + + default: + NT_LOG(ERR, FILTER, "FLM queue create illegal caller: %u", caller); + return NULL; + } + + q = rte_ring_create_elem(name, + elem_size, + queue_size, + SOCKET_ID_ANY, + RING_F_SP_ENQ | RING_F_SC_DEQ); + + if (q == NULL) { + NT_LOG(WRN, FILTER, "FLM queues cannot be created due to error %02X", rte_errno); + return NULL; + } + + switch (caller) { + case FLM_INFO_LOCAL: + info_q_local[port] = q; + break; + + case FLM_INFO_REMOTE: + info_q_remote[port] = q; + break; + + case FLM_STAT_LOCAL: + stat_q_local[port] = q; + break; + + case FLM_STAT_REMOTE: + stat_q_remote[port] = q; + break; + + default: + break; + } + + return q; +} + +int flm_sta_queue_put(uint8_t port, bool remote, struct flm_status_event_s *obj) +{ + struct rte_ring **stat_q = remote ? stat_q_remote : stat_q_local; + + if (port >= (remote ? MAX_STAT_RMT_QUEUES : MAX_STAT_LCL_QUEUES)) + return -1; + + if (stat_q[port] == NULL) { + if (flm_evt_queue_create(port, remote ? FLM_STAT_REMOTE : FLM_STAT_LOCAL) == NULL) + return -1; + } + + if (rte_ring_sp_enqueue_elem(stat_q[port], obj, FLM_STAT_ELEM_SIZE) != 0) { + NT_LOG(DBG, FILTER, "FLM local status queue full"); + return -1; + } + + return 0; +} + +void flm_inf_queue_put(uint8_t port, bool remote, struct flm_info_event_s *obj) +{ + int ret; + + /* If queues is not created, then ignore and return */ + if (!remote) { + if (port < MAX_INFO_LCL_QUEUES && info_q_local[port] != NULL) { + ret = rte_ring_sp_enqueue_elem(info_q_local[port], obj, FLM_EVT_ELEM_SIZE); + + if (ret != 0) + NT_LOG(DBG, FILTER, "FLM local info queue full"); + } + + } else if (port < MAX_INFO_RMT_QUEUES && info_q_remote[port] != NULL) { + ret = rte_ring_sp_enqueue_elem(info_q_remote[port], obj, FLM_EVT_ELEM_SIZE); + + if (ret != 0) + NT_LOG(DBG, FILTER, "FLM remote info queue full"); + } +} + +int flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj) +{ + int ret; + + /* If queues is not created, then ignore and return */ + if (!remote) { + if (port < MAX_INFO_LCL_QUEUES) { + if (info_q_local[port] != NULL) { + ret = rte_ring_sc_dequeue_elem(info_q_local[port], + obj, + FLM_EVT_ELEM_SIZE); + return ret; + } + + if (flm_evt_queue_create(port, FLM_INFO_LOCAL) != NULL) { + /* Recursive call to get data */ + return flm_inf_queue_get(port, remote, obj); + } + } + + } else if (port < MAX_INFO_RMT_QUEUES) { + if (info_q_remote[port] != NULL) { + ret = rte_ring_sc_dequeue_elem(info_q_remote[port], + obj, + FLM_EVT_ELEM_SIZE); + return ret; + } + + if (flm_evt_queue_create(port, FLM_INFO_REMOTE) != NULL) { + /* Recursive call to get data */ + return flm_inf_queue_get(port, remote, obj); + } + } + + return -ENOENT; +} diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h new file mode 100644 index 0000000000..ee8175cf25 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _FLM_EVT_QUEUE_H_ +#define _FLM_EVT_QUEUE_H_ + +#include "stdint.h" +#include "stdbool.h" + +struct flm_status_event_s { + void *flow; + uint32_t learn_ignore : 1; + uint32_t learn_failed : 1; + uint32_t learn_done : 1; +}; + +struct flm_info_event_s { + uint64_t bytes; + uint64_t packets; + uint64_t timestamp; + uint64_t id; + uint8_t cause; +}; + +enum { + FLM_INFO_LOCAL, + FLM_INFO_REMOTE, + FLM_STAT_LOCAL, + FLM_STAT_REMOTE, +}; + +/* Max number of local queues */ +#define MAX_INFO_LCL_QUEUES 8 +#define MAX_STAT_LCL_QUEUES 8 + +/* Max number of remote queues */ +#define MAX_INFO_RMT_QUEUES 128 +#define MAX_STAT_RMT_QUEUES 128 + +/* queue size */ +#define FLM_EVT_QUEUE_SIZE 8192 +#define FLM_STAT_QUEUE_SIZE 8192 + +/* Event element size */ +#define FLM_EVT_ELEM_SIZE sizeof(struct flm_info_event_s) +#define FLM_STAT_ELEM_SIZE sizeof(struct flm_status_event_s) + +void flm_inf_sta_queue_free_all(uint8_t caller); +void flm_inf_queue_put(uint8_t port, bool remote, struct flm_info_event_s *obj); +int flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj); +int flm_sta_queue_put(uint8_t port, bool remote, struct flm_status_event_s *obj); + +#endif /* _FLM_EVT_QUEUE_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c new file mode 100644 index 0000000000..6e77c28f93 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.c @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#include +#include +#include + +#include + +#include "hw_mod_flm_v25.h" + +#include "flm_lrn_queue.h" + +#define QUEUE_SIZE (1 << 13) + +#define ELEM_SIZE sizeof(struct flm_v25_lrn_data_s) + +void *flm_lrn_queue_create(void) +{ + static_assert((ELEM_SIZE & ~(size_t)3) == ELEM_SIZE, "FLM LEARN struct size"); + struct rte_ring *q = rte_ring_create_elem("RFQ", + ELEM_SIZE, + QUEUE_SIZE, + SOCKET_ID_ANY, + RING_F_MP_HTS_ENQ | RING_F_SC_DEQ); + assert(q != NULL); + return q; +} + +void flm_lrn_queue_free(void *q) +{ + if (q) + rte_ring_free(q); +} + +uint32_t *flm_lrn_queue_get_write_buffer(void *q) +{ + struct rte_ring_zc_data zcd; + unsigned int n = rte_ring_enqueue_zc_burst_elem_start(q, ELEM_SIZE, 1, &zcd, NULL); + return (n == 0) ? NULL : zcd.ptr1; +} + +void flm_lrn_queue_release_write_buffer(void *q) +{ + rte_ring_enqueue_zc_elem_finish(q, 1); +} + +read_record flm_lrn_queue_get_read_buffer(void *q) +{ + struct rte_ring_zc_data zcd; + read_record rr; + + if (rte_ring_dequeue_zc_burst_elem_start(q, ELEM_SIZE, QUEUE_SIZE, &zcd, NULL) != 0) { + rr.num = zcd.n1; + rr.p = zcd.ptr1; + + } else { + rr.num = 0; + rr.p = NULL; + } + + return rr; +} + +void flm_lrn_queue_release_read_buffer(void *q, uint32_t num) +{ + rte_ring_dequeue_zc_elem_finish(q, num); +} diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h new file mode 100644 index 0000000000..40558f4201 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_lrn_queue.h @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _FLM_LRN_QUEUE_H_ +#define _FLM_LRN_QUEUE_H_ + +#include + +typedef struct read_record { + uint32_t *p; + uint32_t num; +} read_record; + +void *flm_lrn_queue_create(void); +void flm_lrn_queue_free(void *q); + +uint32_t *flm_lrn_queue_get_write_buffer(void *q); +void flm_lrn_queue_release_write_buffer(void *q); + +read_record flm_lrn_queue_get_read_buffer(void *q); +void flm_lrn_queue_release_read_buffer(void *q, uint32_t num); + +#endif /* _FLM_LRN_QUEUE_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c new file mode 100644 index 0000000000..ffab643f56 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c @@ -0,0 +1,3000 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + + +#include "hw_mod_backend.h" +#include "flow_api_engine.h" + +#include "flow_api_hw_db_inline.h" +#include "flow_api_profile_inline_config.h" +#include "rte_common.h" + +#define HW_DB_INLINE_ACTION_SET_NB 512 +#define HW_DB_INLINE_MATCH_SET_NB 512 + +#define HW_DB_FT_LOOKUP_KEY_A 0 + +#define HW_DB_FT_TYPE_KM 1 +#define HW_DB_FT_LOOKUP_KEY_A 0 +#define HW_DB_FT_LOOKUP_KEY_C 2 + +#define HW_DB_FT_TYPE_FLM 0 +#define HW_DB_FT_TYPE_KM 1 +/******************************************************************************/ +/* Handle */ +/******************************************************************************/ + +struct hw_db_inline_resource_db { + /* Actions */ + struct hw_db_inline_resource_db_cot { + struct hw_db_inline_cot_data data; + int ref; + } *cot; + + struct hw_db_inline_resource_db_qsl { + struct hw_db_inline_qsl_data data; + int qst_idx; + } *qsl; + + struct hw_db_inline_resource_db_slc_lr { + struct hw_db_inline_slc_lr_data data; + int ref; + } *slc_lr; + + struct hw_db_inline_resource_db_tpe { + struct hw_db_inline_tpe_data data; + int ref; + } *tpe; + + struct hw_db_inline_resource_db_tpe_ext { + struct hw_db_inline_tpe_ext_data data; + int replace_ram_idx; + int ref; + } *tpe_ext; + + struct hw_db_inline_resource_db_hsh { + struct hw_db_inline_hsh_data data; + int ref; + } *hsh; + + struct hw_db_inline_resource_db_scrub { + struct hw_db_inline_scrub_data data; + int ref; + } *scrub; + + uint32_t nb_cot; + uint32_t nb_qsl; + uint32_t nb_slc_lr; + uint32_t nb_tpe; + uint32_t nb_tpe_ext; + uint32_t nb_hsh; + uint32_t nb_scrub; + + /* Items */ + struct hw_db_inline_resource_db_cat { + struct hw_db_inline_cat_data data; + int ref; + } *cat; + + struct hw_db_inline_resource_db_flm_rcp { + struct hw_db_inline_flm_rcp_data data; + int ref; + + struct hw_db_inline_resource_db_flm_ft { + struct hw_db_inline_flm_ft_data data; + struct hw_db_flm_ft idx; + int ref; + } *ft; + + struct hw_db_inline_resource_db_flm_match_set { + struct hw_db_match_set_idx idx; + int ref; + } *match_set; + + struct hw_db_inline_resource_db_flm_cfn_map { + int cfn_idx; + } *cfn_map; + } *flm; + + struct hw_db_inline_resource_db_km_rcp { + struct hw_db_inline_km_rcp_data data; + int ref; + + struct hw_db_inline_resource_db_km_ft { + struct hw_db_inline_km_ft_data data; + int ref; + } *ft; + } *km; + + uint32_t nb_cat; + uint32_t nb_flm_ft; + uint32_t nb_flm_rcp; + uint32_t nb_km_ft; + uint32_t nb_km_rcp; + + /* Hardware */ + + struct hw_db_inline_resource_db_cfn { + uint64_t priority; + int cfn_hw; + int ref; + } *cfn; + + uint32_t cfn_priority_counter; + uint32_t set_priority_counter; + + struct hw_db_inline_resource_db_action_set { + struct hw_db_inline_action_set_data data; + int ref; + } action_set[HW_DB_INLINE_ACTION_SET_NB]; + + struct hw_db_inline_resource_db_match_set { + struct hw_db_inline_match_set_data data; + int ref; + uint32_t set_priority; + } match_set[HW_DB_INLINE_MATCH_SET_NB]; +}; + +int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle) +{ + /* Note: calloc is required for functionality in the hw_db_inline_destroy() */ + struct hw_db_inline_resource_db *db = calloc(1, sizeof(struct hw_db_inline_resource_db)); + + if (db == NULL) + return -1; + + db->nb_cot = ndev->be.cat.nb_cat_funcs; + db->cot = calloc(db->nb_cot, sizeof(struct hw_db_inline_resource_db_cot)); + + if (db->cot == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_qsl = ndev->be.qsl.nb_rcp_categories; + db->qsl = calloc(db->nb_qsl, sizeof(struct hw_db_inline_resource_db_qsl)); + + if (db->qsl == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_slc_lr = ndev->be.max_categories; + db->slc_lr = calloc(db->nb_slc_lr, sizeof(struct hw_db_inline_resource_db_slc_lr)); + + if (db->slc_lr == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_tpe = ndev->be.tpe.nb_rcp_categories; + db->tpe = calloc(db->nb_tpe, sizeof(struct hw_db_inline_resource_db_tpe)); + + if (db->tpe == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_tpe_ext = ndev->be.tpe.nb_rpl_ext_categories; + db->tpe_ext = calloc(db->nb_tpe_ext, sizeof(struct hw_db_inline_resource_db_tpe_ext)); + + if (db->tpe_ext == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_cat = ndev->be.cat.nb_cat_funcs; + db->cat = calloc(db->nb_cat, sizeof(struct hw_db_inline_resource_db_cat)); + + if (db->cat == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + + db->nb_flm_ft = ndev->be.cat.nb_flow_types; + db->nb_flm_rcp = ndev->be.flm.nb_categories; + db->flm = calloc(db->nb_flm_rcp, sizeof(struct hw_db_inline_resource_db_flm_rcp)); + + if (db->flm == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + for (uint32_t i = 0; i < db->nb_flm_rcp; ++i) { + db->flm[i].ft = + calloc(db->nb_flm_ft, sizeof(struct hw_db_inline_resource_db_flm_ft)); + + if (db->flm[i].ft == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->flm[i].match_set = + calloc(db->nb_cat, sizeof(struct hw_db_inline_resource_db_flm_match_set)); + + if (db->flm[i].match_set == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->flm[i].cfn_map = calloc(db->nb_cat * db->nb_flm_ft, + sizeof(struct hw_db_inline_resource_db_flm_cfn_map)); + + if (db->flm[i].cfn_map == NULL) { + hw_db_inline_destroy(db); + return -1; + } + } + + db->nb_km_ft = ndev->be.cat.nb_flow_types; + db->nb_km_rcp = ndev->be.km.nb_categories; + db->km = calloc(db->nb_km_rcp, sizeof(struct hw_db_inline_resource_db_km_rcp)); + + if (db->km == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + for (uint32_t i = 0; i < db->nb_km_rcp; ++i) { + db->km[i].ft = calloc(db->nb_km_ft * db->nb_cat, + sizeof(struct hw_db_inline_resource_db_km_ft)); + + if (db->km[i].ft == NULL) { + hw_db_inline_destroy(db); + return -1; + } + } + + db->cfn = calloc(db->nb_cat, sizeof(struct hw_db_inline_resource_db_cfn)); + + if (db->cfn == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_hsh = ndev->be.hsh.nb_rcp; + db->hsh = calloc(db->nb_hsh, sizeof(struct hw_db_inline_resource_db_hsh)); + + if (db->hsh == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + db->nb_scrub = ndev->be.flm.nb_scrub_profiles; + db->scrub = calloc(db->nb_scrub, sizeof(struct hw_db_inline_resource_db_scrub)); + + if (db->scrub == NULL) { + hw_db_inline_destroy(db); + return -1; + } + + *db_handle = db; + + /* Preset data */ + + db->flm[0].ft[1].idx.type = HW_DB_IDX_TYPE_FLM_FT; + db->flm[0].ft[1].idx.id1 = 1; + db->flm[0].ft[1].ref = 1; + + return 0; +} + +void hw_db_inline_destroy(void *db_handle) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + free(db->cot); + free(db->qsl); + free(db->slc_lr); + free(db->tpe); + free(db->tpe_ext); + free(db->hsh); + free(db->scrub); + + free(db->cat); + + if (db->flm) { + for (uint32_t i = 0; i < db->nb_flm_rcp; ++i) { + free(db->flm[i].ft); + free(db->flm[i].match_set); + free(db->flm[i].cfn_map); + } + + free(db->flm); + } + + if (db->km) { + for (uint32_t i = 0; i < db->nb_km_rcp; ++i) + free(db->km[i].ft); + + free(db->km); + } + + free(db->cfn); + + free(db); +} + +void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_idx *idxs, + uint32_t size) +{ + for (uint32_t i = 0; i < size; ++i) { + switch (idxs[i].type) { + case HW_DB_IDX_TYPE_NONE: + break; + + case HW_DB_IDX_TYPE_MATCH_SET: + hw_db_inline_match_set_deref(ndev, db_handle, + *(struct hw_db_match_set_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_ACTION_SET: + hw_db_inline_action_set_deref(ndev, db_handle, + *(struct hw_db_action_set_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_CAT: + hw_db_inline_cat_deref(ndev, db_handle, *(struct hw_db_cat_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_COT: + hw_db_inline_cot_deref(ndev, db_handle, *(struct hw_db_cot_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_QSL: + hw_db_inline_qsl_deref(ndev, db_handle, *(struct hw_db_qsl_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_SLC_LR: + hw_db_inline_slc_lr_deref(ndev, db_handle, + *(struct hw_db_slc_lr_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_TPE: + hw_db_inline_tpe_deref(ndev, db_handle, *(struct hw_db_tpe_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_TPE_EXT: + hw_db_inline_tpe_ext_deref(ndev, db_handle, + *(struct hw_db_tpe_ext_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_FLM_RCP: + hw_db_inline_flm_deref(ndev, db_handle, *(struct hw_db_flm_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_FLM_FT: + hw_db_inline_flm_ft_deref(ndev, db_handle, + *(struct hw_db_flm_ft *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_KM_RCP: + hw_db_inline_km_deref(ndev, db_handle, *(struct hw_db_km_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_KM_FT: + hw_db_inline_km_ft_deref(ndev, db_handle, *(struct hw_db_km_ft *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_HSH: + hw_db_inline_hsh_deref(ndev, db_handle, *(struct hw_db_hsh_idx *)&idxs[i]); + break; + + case HW_DB_IDX_TYPE_FLM_SCRUB: + hw_db_inline_scrub_deref(ndev, db_handle, + *(struct hw_db_flm_scrub_idx *)&idxs[i]); + break; + + default: + break; + } + } +} + +struct hw_db_idx *hw_db_inline_find_idx(struct flow_nic_dev *ndev, void *db_handle, + enum hw_db_idx_type type, struct hw_db_idx *idxs, uint32_t size) +{ + (void)ndev; + (void)db_handle; + for (uint32_t i = 0; i < size; ++i) { + if (idxs[i].type == type) + return &idxs[i]; + } + + return NULL; +} + +void hw_db_inline_dump(struct flow_nic_dev *ndev, void *db_handle, const struct hw_db_idx *idxs, + uint32_t size, FILE *file) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + char str_buffer[4096]; + uint16_t rss_buffer_len = sizeof(str_buffer); + + for (uint32_t i = 0; i < size; ++i) { + switch (idxs[i].type) { + case HW_DB_IDX_TYPE_NONE: + break; + + case HW_DB_IDX_TYPE_MATCH_SET: { + const struct hw_db_inline_match_set_data *data = + &db->match_set[idxs[i].ids].data; + fprintf(file, " MATCH_SET %d, priority %d\n", idxs[i].ids, + (int)data->priority); + fprintf(file, " CAT id %d, KM id %d, KM_FT id %d, ACTION_SET id %d\n", + data->cat.ids, data->km.id1, data->km_ft.id1, + data->action_set.ids); + + if (data->jump) + fprintf(file, " Jumps to %d\n", data->jump); + + break; + } + + case HW_DB_IDX_TYPE_ACTION_SET: { + const struct hw_db_inline_action_set_data *data = + &db->action_set[idxs[i].ids].data; + fprintf(file, " ACTION_SET %d\n", idxs[i].ids); + + if (data->contains_jump) + fprintf(file, " Jumps to %d\n", data->jump); + + else + fprintf(file, + " COT id %d, QSL id %d, SLC_LR id %d, TPE id %d, HSH id %d, SCRUB id %d\n", + data->cot.ids, data->qsl.ids, data->slc_lr.ids, + data->tpe.ids, data->hsh.ids, data->scrub.ids); + + break; + } + + case HW_DB_IDX_TYPE_CAT: { + const struct hw_db_inline_cat_data *data = &db->cat[idxs[i].ids].data; + fprintf(file, " CAT %d\n", idxs[i].ids); + fprintf(file, " Port msk 0x%02x, VLAN msk 0x%02x\n", + (int)data->mac_port_mask, (int)data->vlan_mask); + fprintf(file, + " Proto msks: Frag 0x%02x, l2 0x%02x, l3 0x%02x, l4 0x%02x, l3t 0x%02x, l4t 0x%02x\n", + (int)data->ptc_mask_frag, (int)data->ptc_mask_l2, + (int)data->ptc_mask_l3, (int)data->ptc_mask_l4, + (int)data->ptc_mask_l3_tunnel, (int)data->ptc_mask_l4_tunnel); + fprintf(file, " IP protocol: pn %u pnt %u\n", data->ip_prot, + data->ip_prot_tunnel); + break; + } + + case HW_DB_IDX_TYPE_QSL: { + const struct hw_db_inline_qsl_data *data = &db->qsl[idxs[i].ids].data; + fprintf(file, " QSL %d\n", idxs[i].ids); + + if (data->discard) { + fprintf(file, " Discard\n"); + break; + } + + if (data->drop) { + fprintf(file, " Drop\n"); + break; + } + + fprintf(file, " Table size %d\n", data->table_size); + + for (uint32_t i = 0; + i < data->table_size && i < HW_DB_INLINE_MAX_QST_PER_QSL; ++i) { + fprintf(file, " %u: Queue %d, TX port %d\n", i, + (data->table[i].queue_en ? (int)data->table[i].queue : -1), + (data->table[i].tx_port_en ? (int)data->table[i].tx_port + : -1)); + } + + break; + } + + case HW_DB_IDX_TYPE_COT: { + const struct hw_db_inline_cot_data *data = &db->cot[idxs[i].ids].data; + fprintf(file, " COT %d\n", idxs[i].ids); + fprintf(file, " Color contrib %d, frag rcp %d\n", + (int)data->matcher_color_contrib, (int)data->frag_rcp); + break; + } + + case HW_DB_IDX_TYPE_SLC_LR: { + const struct hw_db_inline_slc_lr_data *data = + &db->slc_lr[idxs[i].ids].data; + fprintf(file, " SLC_LR %d\n", idxs[i].ids); + fprintf(file, " Enable %u, dyn %u, ofs %u\n", data->head_slice_en, + data->head_slice_dyn, data->head_slice_ofs); + break; + } + + case HW_DB_IDX_TYPE_TPE: { + const struct hw_db_inline_tpe_data *data = &db->tpe[idxs[i].ids].data; + fprintf(file, " TPE %d\n", idxs[i].ids); + fprintf(file, " Insert len %u, new outer %u, calc eth %u\n", + data->insert_len, data->new_outer, + data->calc_eth_type_from_inner_ip); + fprintf(file, " TTL enable %u, dyn %u, ofs %u\n", data->ttl_en, + data->ttl_dyn, data->ttl_ofs); + fprintf(file, + " Len A enable %u, pos dyn %u, pos ofs %u, add dyn %u, add ofs %u, sub dyn %u\n", + data->len_a_en, data->len_a_pos_dyn, data->len_a_pos_ofs, + data->len_a_add_dyn, data->len_a_add_ofs, data->len_a_sub_dyn); + fprintf(file, + " Len B enable %u, pos dyn %u, pos ofs %u, add dyn %u, add ofs %u, sub dyn %u\n", + data->len_b_en, data->len_b_pos_dyn, data->len_b_pos_ofs, + data->len_b_add_dyn, data->len_b_add_ofs, data->len_b_sub_dyn); + fprintf(file, + " Len C enable %u, pos dyn %u, pos ofs %u, add dyn %u, add ofs %u, sub dyn %u\n", + data->len_c_en, data->len_c_pos_dyn, data->len_c_pos_ofs, + data->len_c_add_dyn, data->len_c_add_ofs, data->len_c_sub_dyn); + + for (uint32_t i = 0; i < 6; ++i) + if (data->writer[i].en) + fprintf(file, + " Writer %i: Reader %u, dyn %u, ofs %u, len %u\n", + i, data->writer[i].reader_select, + data->writer[i].dyn, data->writer[i].ofs, + data->writer[i].len); + + break; + } + + case HW_DB_IDX_TYPE_TPE_EXT: { + const struct hw_db_inline_tpe_ext_data *data = + &db->tpe_ext[idxs[i].ids].data; + const int rpl_rpl_length = ((int)data->size + 15) / 16; + fprintf(file, " TPE_EXT %d\n", idxs[i].ids); + fprintf(file, " Encap data, size %u\n", data->size); + + for (int i = 0; i < rpl_rpl_length; ++i) { + fprintf(file, " "); + + for (int n = 15; n >= 0; --n) + fprintf(file, " %02x%s", data->hdr8[i * 16 + n], + n == 8 ? " " : ""); + + fprintf(file, "\n"); + } + + break; + } + + case HW_DB_IDX_TYPE_FLM_RCP: { + const struct hw_db_inline_flm_rcp_data *data = &db->flm[idxs[i].id1].data; + fprintf(file, " FLM_RCP %d\n", idxs[i].id1); + fprintf(file, " QW0 dyn %u, ofs %u, QW4 dyn %u, ofs %u\n", + data->qw0_dyn, data->qw0_ofs, data->qw4_dyn, data->qw4_ofs); + fprintf(file, " SW8 dyn %u, ofs %u, SW9 dyn %u, ofs %u\n", + data->sw8_dyn, data->sw8_ofs, data->sw9_dyn, data->sw9_ofs); + fprintf(file, " Outer prot %u, inner prot %u\n", data->outer_prot, + data->inner_prot); + fprintf(file, " Mask:\n"); + fprintf(file, " %08x %08x %08x %08x %08x\n", data->mask[0], + data->mask[1], data->mask[2], data->mask[3], data->mask[4]); + fprintf(file, " %08x %08x %08x %08x %08x\n", data->mask[5], + data->mask[6], data->mask[7], data->mask[8], data->mask[9]); + break; + } + + case HW_DB_IDX_TYPE_FLM_FT: { + const struct hw_db_inline_flm_ft_data *data = + &db->flm[idxs[i].id2].ft[idxs[i].id1].data; + fprintf(file, " FLM_FT %d\n", idxs[i].id1); + + if (data->is_group_zero) + fprintf(file, " Jump to %d\n", data->jump); + + else + fprintf(file, " Group %d\n", data->group); + + fprintf(file, " ACTION_SET id %d\n", data->action_set.ids); + break; + } + + case HW_DB_IDX_TYPE_KM_RCP: { + const struct hw_db_inline_km_rcp_data *data = &db->km[idxs[i].id1].data; + fprintf(file, " KM_RCP %d\n", idxs[i].id1); + fprintf(file, " HW id %u\n", data->rcp); + break; + } + + case HW_DB_IDX_TYPE_KM_FT: { + const struct hw_db_inline_km_ft_data *data = + &db->km[idxs[i].id2].ft[idxs[i].id1].data; + fprintf(file, " KM_FT %d\n", idxs[i].id1); + fprintf(file, " ACTION_SET id %d\n", data->action_set.ids); + fprintf(file, " KM_RCP id %d\n", data->km.ids); + fprintf(file, " CAT id %d\n", data->cat.ids); + break; + } + + case HW_DB_IDX_TYPE_FLM_SCRUB: { + const struct hw_db_inline_scrub_data *data = &db->scrub[idxs[i].ids].data; + fprintf(file, " FLM_RCP %d\n", idxs[i].id1); + fprintf(file, " SCRUB %d\n", idxs[i].ids); + fprintf(file, " Timeout: %d, encoded timeout: %d\n", + hw_mod_flm_scrub_timeout_decode(data->timeout), data->timeout); + break; + } + + case HW_DB_IDX_TYPE_HSH: { + const struct hw_db_inline_hsh_data *data = &db->hsh[idxs[i].ids].data; + fprintf(file, " HSH %d\n", idxs[i].ids); + + switch (data->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + fprintf(file, " Func: NTH10\n"); + break; + + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + fprintf(file, " Func: Toeplitz\n"); + fprintf(file, " Key:"); + + for (uint8_t i = 0; i < MAX_RSS_KEY_LEN; i++) { + if (i % 10 == 0) + fprintf(file, "\n "); + + fprintf(file, " %02x", data->key[i]); + } + + fprintf(file, "\n"); + break; + + default: + fprintf(file, " Func: %u\n", data->func); + } + + fprintf(file, " Hash mask hex:\n"); + fprintf(file, " %016lx\n", data->hash_mask); + + /* convert hash mask to human readable RTE_ETH_RSS_* form if possible */ + if (sprint_nt_rss_mask(str_buffer, rss_buffer_len, "\n ", + data->hash_mask) == 0) { + fprintf(file, " Hash mask flags:%s\n", str_buffer); + } + + break; + } + + default: { + fprintf(file, " Unknown item. Type %u\n", idxs[i].type); + break; + } + } + } +} + +void hw_db_inline_dump_cfn(struct flow_nic_dev *ndev, void *db_handle, FILE *file) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + fprintf(file, "CFN status:\n"); + + for (uint32_t id = 0; id < db->nb_cat; ++id) + if (db->cfn[id].cfn_hw) + fprintf(file, " ID %d, HW id %d, priority 0x%" PRIx64 "\n", (int)id, + db->cfn[id].cfn_hw, db->cfn[id].priority); +} + +const void *hw_db_inline_find_data(struct flow_nic_dev *ndev, void *db_handle, + enum hw_db_idx_type type, struct hw_db_idx *idxs, uint32_t size) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + for (uint32_t i = 0; i < size; ++i) { + if (idxs[i].type != type) + continue; + + switch (type) { + case HW_DB_IDX_TYPE_NONE: + return NULL; + + case HW_DB_IDX_TYPE_MATCH_SET: + return &db->match_set[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_ACTION_SET: + return &db->action_set[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_CAT: + return &db->cat[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_QSL: + return &db->qsl[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_COT: + return &db->cot[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_SLC_LR: + return &db->slc_lr[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_TPE: + return &db->tpe[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_TPE_EXT: + return &db->tpe_ext[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_FLM_RCP: + return &db->flm[idxs[i].id1].data; + + case HW_DB_IDX_TYPE_FLM_FT: + return NULL; /* FTs can't be easily looked up */ + + case HW_DB_IDX_TYPE_KM_RCP: + return &db->km[idxs[i].id1].data; + + case HW_DB_IDX_TYPE_KM_FT: + return NULL; /* FTs can't be easily looked up */ + + case HW_DB_IDX_TYPE_HSH: + return &db->hsh[idxs[i].ids].data; + + case HW_DB_IDX_TYPE_FLM_SCRUB: + return &db->scrub[idxs[i].ids].data; + + default: + return NULL; + } + } + + return NULL; +} + +/******************************************************************************/ +/* Filter */ +/******************************************************************************/ + +/* + * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3. + */ +static void hw_db_set_ft(struct flow_nic_dev *ndev, int type, int cfn_index, int lookup, + int flow_type, int enable) +{ + (void)type; + (void)enable; + + const int max_lookups = 4; + const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8; + + int fte_index = (8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup; + int fte_field = cfn_index % cat_funcs; + + uint32_t current_bm = 0; + uint32_t fte_field_bm = 1 << fte_field; + + switch (type) { + case HW_DB_FT_TYPE_FLM: + hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, fte_index, + ¤t_bm); + break; + + case HW_DB_FT_TYPE_KM: + hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, fte_index, + ¤t_bm); + break; + + default: + break; + } + + uint32_t final_bm = enable ? (fte_field_bm | current_bm) : (~fte_field_bm & current_bm); + + if (current_bm != final_bm) { + switch (type) { + case HW_DB_FT_TYPE_FLM: + hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index, final_bm); + hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index, 1); + break; + + case HW_DB_FT_TYPE_KM: + hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index, final_bm); + hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index, 1); + break; + + default: + break; + } + } +} + +/* + * Setup a filter to match: + * All packets in CFN checks + * All packets in KM + * All packets in FLM with look-up C FT equal to specified argument + * + * Setup a QSL recipe to DROP all matching packets + * + * Note: QSL recipe 0 uses DISCARD in order to allow for exception paths (UNMQ) + * Consequently another QSL recipe with hard DROP is needed + */ +int hw_db_inline_setup_mbr_filter(struct flow_nic_dev *ndev, uint32_t cat_hw_id, uint32_t ft, + uint32_t qsl_hw_id) +{ + (void)ft; + (void)qsl_hw_id; + (void)ft; + + const int offset = ((int)ndev->be.cat.cts_num + 1) / 2; + (void)offset; + + /* QSL for traffic policing */ + if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DROP, qsl_hw_id, 0x3) < 0) + return -1; + + if (hw_mod_qsl_rcp_flush(&ndev->be, qsl_hw_id, 1) < 0) + return -1; + + /* Select and enable QSL recipe */ + if (hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 1, qsl_hw_id)) + return -1; + + if (hw_mod_cat_cts_flush(&ndev->be, offset * cat_hw_id, 6)) + return -1; + + if (hw_mod_cat_cte_set(&ndev->be, HW_CAT_CTE_ENABLE_BM, cat_hw_id, 0x8)) + return -1; + + if (hw_mod_cat_cte_flush(&ndev->be, cat_hw_id, 1)) + return -1; + + /* KM: Match all FTs for look-up A */ + for (int i = 0; i < 16; ++i) + hw_db_set_ft(ndev, HW_DB_FT_TYPE_KM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_A, i, 1); + + /* FLM: Match all FTs for look-up A */ + for (int i = 0; i < 16; ++i) + hw_db_set_ft(ndev, HW_DB_FT_TYPE_FLM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_A, i, 1); + + /* FLM: Match FT=ft_argument for look-up C */ + hw_db_set_ft(ndev, HW_DB_FT_TYPE_FLM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_C, ft, 1); + + /* Make all CFN checks TRUE */ + if (hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS, cat_hw_id, 0, 0)) + return -1; + + if (hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ENABLE, cat_hw_id, 0, 0x1)) + return -1; + + if (hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_L3, cat_hw_id, 0, 0x0)) + return -1; + + if (hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_INV, cat_hw_id, 0, 0x1)) + return -1; + + /* Final match: look-up_A == TRUE && look-up_C == TRUE */ + if (hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_KM0_OR, cat_hw_id, 0, 0x1)) + return -1; + + if (hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_KM1_OR, cat_hw_id, 0, 0x3)) + return -1; + + if (hw_mod_cat_cfn_flush(&ndev->be, cat_hw_id, 1)) + return -1; + + return 0; +} + +static void hw_db_inline_setup_default_flm_rcp(struct flow_nic_dev *ndev, int flm_rcp) +{ + uint32_t flm_mask[10]; + memset(flm_mask, 0xff, sizeof(flm_mask)); + + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, flm_rcp, 0x0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_LOOKUP, flm_rcp, 1); + hw_mod_flm_rcp_set_mask(&ndev->be, HW_FLM_RCP_MASK, flm_rcp, flm_mask); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_KID, flm_rcp, flm_rcp + 2); + + hw_mod_flm_rcp_flush(&ndev->be, flm_rcp, 1); +} + + +static void hw_db_copy_ft(struct flow_nic_dev *ndev, int type, int cfn_dst, int cfn_src, + int lookup, int flow_type) +{ + const int max_lookups = 4; + const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8; + + int fte_index_dst = (8 * flow_type + cfn_dst / cat_funcs) * max_lookups + lookup; + int fte_field_dst = cfn_dst % cat_funcs; + + int fte_index_src = (8 * flow_type + cfn_src / cat_funcs) * max_lookups + lookup; + int fte_field_src = cfn_src % cat_funcs; + + uint32_t current_bm_dst = 0; + uint32_t current_bm_src = 0; + uint32_t fte_field_bm_dst = 1 << fte_field_dst; + uint32_t fte_field_bm_src = 1 << fte_field_src; + + switch (type) { + case HW_DB_FT_TYPE_FLM: + hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index_dst, ¤t_bm_dst); + hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index_src, ¤t_bm_src); + break; + + case HW_DB_FT_TYPE_KM: + hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index_dst, ¤t_bm_dst); + hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index_src, ¤t_bm_src); + break; + + default: + break; + } + + uint32_t enable = current_bm_src & fte_field_bm_src; + uint32_t final_bm_dst = enable ? (fte_field_bm_dst | current_bm_dst) + : (~fte_field_bm_dst & current_bm_dst); + + if (current_bm_dst != final_bm_dst) { + switch (type) { + case HW_DB_FT_TYPE_FLM: + hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index_dst, final_bm_dst); + hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index_dst, 1); + break; + + case HW_DB_FT_TYPE_KM: + hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST, + fte_index_dst, final_bm_dst); + hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index_dst, 1); + break; + + default: + break; + } + } +} + + +static int hw_db_inline_filter_apply(struct flow_nic_dev *ndev, + struct hw_db_inline_resource_db *db, + int cat_hw_id, + struct hw_db_match_set_idx match_set_idx, + struct hw_db_flm_ft flm_ft_idx, + struct hw_db_action_set_idx action_set_idx) +{ + (void)match_set_idx; + (void)flm_ft_idx; + + const struct hw_db_inline_match_set_data *match_set = + &db->match_set[match_set_idx.ids].data; + const struct hw_db_inline_cat_data *cat = &db->cat[match_set->cat.ids].data; + + const int km_ft = match_set->km_ft.id1; + const int km_rcp = (int)db->km[match_set->km.id1].data.rcp; + + const int flm_ft = flm_ft_idx.id1; + const int flm_rcp = flm_ft_idx.id2; + + const struct hw_db_inline_action_set_data *action_set = + &db->action_set[action_set_idx.ids].data; + const struct hw_db_inline_cot_data *cot = &db->cot[action_set->cot.ids].data; + + const int qsl_hw_id = action_set->qsl.ids; + const int slc_lr_hw_id = action_set->slc_lr.ids; + const int tpe_hw_id = action_set->tpe.ids; + const int hsh_hw_id = action_set->hsh.ids; + + /* Setup default FLM RCP if needed */ + if (flm_rcp > 0 && db->flm[flm_rcp].ref <= 0) + hw_db_inline_setup_default_flm_rcp(ndev, flm_rcp); + + /* Setup CAT.CFN */ + { + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ENABLE, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_INV, cat_hw_id, 0, 0x0); + + /* Protocol checks */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_INV, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_ISL, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_CFP, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_MAC, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_L2, cat_hw_id, 0, cat->ptc_mask_l2); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_VNTAG, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_VLAN, cat_hw_id, 0, cat->vlan_mask); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_MPLS, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_L3, cat_hw_id, 0, cat->ptc_mask_l3); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_FRAG, cat_hw_id, 0, + cat->ptc_mask_frag); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_IP_PROT, cat_hw_id, 0, cat->ip_prot); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_L4, cat_hw_id, 0, cat->ptc_mask_l4); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TUNNEL, cat_hw_id, 0, + cat->ptc_mask_tunnel); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_L2, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_VLAN, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_MPLS, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_L3, cat_hw_id, 0, + cat->ptc_mask_l3_tunnel); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_FRAG, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT, cat_hw_id, 0, + cat->ip_prot_tunnel); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PTC_TNL_L4, cat_hw_id, 0, + cat->ptc_mask_l4_tunnel); + + /* Error checks */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_INV, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_CV, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_FCS, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_TRUNC, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_L3_CS, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_L4_CS, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_TTL_EXP, cat_hw_id, 0, + cat->err_mask_ttl); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ERR_TNL_TTL_EXP, cat_hw_id, 0, + cat->err_mask_ttl_tunnel); + + /* MAC port check */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_MAC_PORT, cat_hw_id, 0, + cat->mac_port_mask); + + /* Pattern match checks */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_CMP, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_DCT, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_EXT_INV, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_CMB, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_AND_INV, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_OR_INV, cat_hw_id, 0, -1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PM_INV, cat_hw_id, 0, -1); + + /* Length checks */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_LC, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_LC_INV, cat_hw_id, 0, -1); + + /* KM and FLM */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_KM0_OR, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_KM1_OR, cat_hw_id, 0, 0x3); + + hw_mod_cat_cfn_flush(&ndev->be, cat_hw_id, 1); + } + + /* Setup CAT.CTS */ + { + const int offset = ((int)ndev->be.cat.cts_num + 1) / 2; + + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + 0, cat_hw_id); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 0, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + 1, hsh_hw_id); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 1, qsl_hw_id); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + 2, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 2, + slc_lr_hw_id); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + 3, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 3, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + 4, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 4, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + 5, tpe_hw_id); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + 5, 0); + + hw_mod_cat_cts_flush(&ndev->be, offset * cat_hw_id, 6); + } + + /* Setup CAT.CTE */ + { + hw_mod_cat_cte_set(&ndev->be, HW_CAT_CTE_ENABLE_BM, cat_hw_id, + 0x001 | 0x004 | (qsl_hw_id ? 0x008 : 0) | + (slc_lr_hw_id ? 0x020 : 0) | 0x040 | + (tpe_hw_id ? 0x400 : 0)); + hw_mod_cat_cte_flush(&ndev->be, cat_hw_id, 1); + } + + /* Setup CAT.KM */ + { + uint32_t bm = 0; + + hw_mod_cat_kcs_km_set(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cat_hw_id, + km_rcp); + hw_mod_cat_kcs_km_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id, 1); + + hw_mod_cat_kce_km_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, &bm); + hw_mod_cat_kce_km_set(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, bm | (1 << (cat_hw_id % 8))); + hw_mod_cat_kce_km_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id / 8, 1); + + hw_db_set_ft(ndev, HW_DB_FT_TYPE_KM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_A, km_ft, 1); + } + + /* Setup CAT.FLM */ + { + uint32_t bm = 0; + + hw_mod_cat_kcs_flm_set(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cat_hw_id, + flm_rcp); + hw_mod_cat_kcs_flm_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id, 1); + + hw_mod_cat_kce_flm_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, &bm); + hw_mod_cat_kce_flm_set(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, bm | (1 << (cat_hw_id % 8))); + hw_mod_cat_kce_flm_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id / 8, 1); + + hw_db_set_ft(ndev, HW_DB_FT_TYPE_FLM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_A, km_ft, 1); + hw_db_set_ft(ndev, HW_DB_FT_TYPE_FLM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_C, flm_ft, 1); + } + + /* Setup CAT.COT */ + { + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, cat_hw_id, 0); + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_COLOR, cat_hw_id, cot->frag_rcp << 10); + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_KM, cat_hw_id, + cot->matcher_color_contrib); + hw_mod_cat_cot_flush(&ndev->be, cat_hw_id, 1); + } + + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ENABLE, cat_hw_id, 0, 0x1); + hw_mod_cat_cfn_flush(&ndev->be, cat_hw_id, 1); + + return 0; +} + +static void hw_db_inline_filter_clear(struct flow_nic_dev *ndev, + struct hw_db_inline_resource_db *db, + int cat_hw_id) +{ + /* Setup CAT.CFN */ + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS, cat_hw_id, 0, 0x0); + hw_mod_cat_cfn_flush(&ndev->be, cat_hw_id, 1); + + /* Setup CAT.CTS */ + { + const int offset = ((int)ndev->be.cat.cts_num + 1) / 2; + + for (int i = 0; i < 6; ++i) { + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cat_hw_id + i, 0); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cat_hw_id + i, 0); + } + + hw_mod_cat_cts_flush(&ndev->be, offset * cat_hw_id, 6); + } + + /* Setup CAT.CTE */ + { + hw_mod_cat_cte_set(&ndev->be, HW_CAT_CTE_ENABLE_BM, cat_hw_id, 0); + hw_mod_cat_cte_flush(&ndev->be, cat_hw_id, 1); + } + + /* Setup CAT.KM */ + { + uint32_t bm = 0; + + hw_mod_cat_kcs_km_set(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cat_hw_id, + 0); + hw_mod_cat_kcs_km_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id, 1); + + hw_mod_cat_kce_km_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, &bm); + hw_mod_cat_kce_km_set(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, bm & ~(1 << (cat_hw_id % 8))); + hw_mod_cat_kce_km_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id / 8, 1); + + for (int ft = 0; ft < (int)db->nb_km_ft; ++ft) { + hw_db_set_ft(ndev, HW_DB_FT_TYPE_KM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_A, ft, + 0); + } + } + + /* Setup CAT.FLM */ + { + uint32_t bm = 0; + + hw_mod_cat_kcs_flm_set(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cat_hw_id, + 0); + hw_mod_cat_kcs_flm_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id, 1); + + hw_mod_cat_kce_flm_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, &bm); + hw_mod_cat_kce_flm_set(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cat_hw_id / 8, bm & ~(1 << (cat_hw_id % 8))); + hw_mod_cat_kce_flm_flush(&ndev->be, KM_FLM_IF_FIRST, cat_hw_id / 8, 1); + + for (int ft = 0; ft < (int)db->nb_flm_ft; ++ft) { + hw_db_set_ft(ndev, HW_DB_FT_TYPE_FLM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_A, ft, + 0); + hw_db_set_ft(ndev, HW_DB_FT_TYPE_FLM, cat_hw_id, HW_DB_FT_LOOKUP_KEY_C, ft, + 0); + } + } + + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, cat_hw_id, 0); + hw_mod_cat_cot_flush(&ndev->be, cat_hw_id, 1); +} + +static void hw_db_inline_filter_copy(struct flow_nic_dev *ndev, + struct hw_db_inline_resource_db *db, int cfn_dst, int cfn_src) +{ + uint32_t val = 0; + + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_COPY_FROM, cfn_dst, 0, cfn_src); + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ENABLE, cfn_dst, 0, 0x0); + hw_mod_cat_cfn_flush(&ndev->be, cfn_dst, 1); + + /* Setup CAT.CTS */ + { + const int offset = ((int)ndev->be.cat.cts_num + 1) / 2; + + for (int i = 0; i < offset; ++i) { + hw_mod_cat_cts_get(&ndev->be, HW_CAT_CTS_CAT_A, offset * cfn_src + i, + &val); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_A, offset * cfn_dst + i, val); + hw_mod_cat_cts_get(&ndev->be, HW_CAT_CTS_CAT_B, offset * cfn_src + i, + &val); + hw_mod_cat_cts_set(&ndev->be, HW_CAT_CTS_CAT_B, offset * cfn_dst + i, val); + } + + hw_mod_cat_cts_flush(&ndev->be, offset * cfn_dst, offset); + } + + /* Setup CAT.CTE */ + { + hw_mod_cat_cte_get(&ndev->be, HW_CAT_CTE_ENABLE_BM, cfn_src, &val); + hw_mod_cat_cte_set(&ndev->be, HW_CAT_CTE_ENABLE_BM, cfn_dst, val); + hw_mod_cat_cte_flush(&ndev->be, cfn_dst, 1); + } + + /* Setup CAT.KM */ + { + uint32_t bit_src = 0; + + hw_mod_cat_kcs_km_get(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cfn_src, + &val); + hw_mod_cat_kcs_km_set(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cfn_dst, + val); + hw_mod_cat_kcs_km_flush(&ndev->be, KM_FLM_IF_FIRST, cfn_dst, 1); + + hw_mod_cat_kce_km_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cfn_src / 8, &val); + bit_src = (val >> (cfn_src % 8)) & 0x1; + + hw_mod_cat_kce_km_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cfn_dst / 8, &val); + val &= ~(1 << (cfn_dst % 8)); + + hw_mod_cat_kce_km_set(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cfn_dst / 8, val | (bit_src << (cfn_dst % 8))); + hw_mod_cat_kce_km_flush(&ndev->be, KM_FLM_IF_FIRST, cfn_dst / 8, 1); + + for (int ft = 0; ft < (int)db->nb_km_ft; ++ft) { + hw_db_copy_ft(ndev, HW_DB_FT_TYPE_KM, cfn_dst, cfn_src, + HW_DB_FT_LOOKUP_KEY_A, ft); + } + } + + /* Setup CAT.FLM */ + { + uint32_t bit_src = 0; + + hw_mod_cat_kcs_flm_get(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cfn_src, + &val); + hw_mod_cat_kcs_flm_set(&ndev->be, HW_CAT_KCS_CATEGORY, KM_FLM_IF_FIRST, cfn_dst, + val); + hw_mod_cat_kcs_flm_flush(&ndev->be, KM_FLM_IF_FIRST, cfn_dst, 1); + + hw_mod_cat_kce_flm_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cfn_src / 8, &val); + bit_src = (val >> (cfn_src % 8)) & 0x1; + + hw_mod_cat_kce_flm_get(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cfn_dst / 8, &val); + val &= ~(1 << (cfn_dst % 8)); + + hw_mod_cat_kce_flm_set(&ndev->be, HW_CAT_KCE_ENABLE_BM, KM_FLM_IF_FIRST, + cfn_dst / 8, val | (bit_src << (cfn_dst % 8))); + hw_mod_cat_kce_flm_flush(&ndev->be, KM_FLM_IF_FIRST, cfn_dst / 8, 1); + + for (int ft = 0; ft < (int)db->nb_flm_ft; ++ft) { + hw_db_copy_ft(ndev, HW_DB_FT_TYPE_FLM, cfn_dst, cfn_src, + HW_DB_FT_LOOKUP_KEY_A, ft); + hw_db_copy_ft(ndev, HW_DB_FT_TYPE_FLM, cfn_dst, cfn_src, + HW_DB_FT_LOOKUP_KEY_C, ft); + } + } + + /* Setup CAT.COT */ + { + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_COPY_FROM, cfn_dst, cfn_src); + hw_mod_cat_cot_flush(&ndev->be, cfn_dst, 1); + } + + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_ENABLE, cfn_dst, 0, 0x1); + hw_mod_cat_cfn_flush(&ndev->be, cfn_dst, 1); +} + +/* + * Algorithm for moving CFN entries to make space with respect of priority. + * The algorithm will make the fewest possible moves to fit a new CFN entry. + */ +static int hw_db_inline_alloc_prioritized_cfn(struct flow_nic_dev *ndev, + struct hw_db_inline_resource_db *db, + struct hw_db_match_set_idx match_set_idx) +{ + const struct hw_db_inline_resource_db_match_set *match_set = + &db->match_set[match_set_idx.ids]; + + uint64_t priority = ((uint64_t)(match_set->data.priority & 0xff) << 56) | + ((uint64_t)(0xffffff - (match_set->set_priority & 0xffffff)) << 32) | + (0xffffffff - ++db->cfn_priority_counter); + + int db_cfn_idx = -1; + + struct { + uint64_t priority; + uint32_t idx; + } sorted_priority[db->nb_cat]; + + memset(sorted_priority, 0x0, sizeof(sorted_priority)); + + uint32_t in_use_count = 0; + + for (uint32_t i = 1; i < db->nb_cat; ++i) { + if (db->cfn[i].ref > 0) { + sorted_priority[db->cfn[i].cfn_hw].priority = db->cfn[i].priority; + sorted_priority[db->cfn[i].cfn_hw].idx = i; + in_use_count += 1; + + } else if (db_cfn_idx == -1) { + db_cfn_idx = (int)i; + } + } + + if (in_use_count >= db->nb_cat - 1) + return -1; + + if (in_use_count == 0) { + db->cfn[db_cfn_idx].ref = 1; + db->cfn[db_cfn_idx].cfn_hw = 1; + db->cfn[db_cfn_idx].priority = priority; + return db_cfn_idx; + } + + int goal = 1; + int free_before = -1000000; + int free_after = 1000000; + int found_smaller = 0; + + for (int i = 1; i < (int)db->nb_cat; ++i) { + if (sorted_priority[i].priority > priority) { /* Bigger */ + goal = i + 1; + + } else if (sorted_priority[i].priority == 0) { /* Not set */ + if (found_smaller) { + if (free_after > i) + free_after = i; + + } else { + free_before = i; + } + + } else {/* Smaller */ + found_smaller = 1; + } + } + + int diff_before = goal - free_before - 1; + int diff_after = free_after - goal; + + if (goal < (int)db->nb_cat && sorted_priority[goal].priority == 0) { + db->cfn[db_cfn_idx].ref = 1; + db->cfn[db_cfn_idx].cfn_hw = goal; + db->cfn[db_cfn_idx].priority = priority; + return db_cfn_idx; + } + + if (diff_after <= diff_before) { + for (int i = free_after; i > goal; --i) { + int *cfn_hw = &db->cfn[sorted_priority[i - 1].idx].cfn_hw; + hw_db_inline_filter_copy(ndev, db, i, *cfn_hw); + hw_db_inline_filter_clear(ndev, db, *cfn_hw); + *cfn_hw = i; + } + + } else { + goal -= 1; + + for (int i = free_before; i < goal; ++i) { + int *cfn_hw = &db->cfn[sorted_priority[i + 1].idx].cfn_hw; + hw_db_inline_filter_copy(ndev, db, i, *cfn_hw); + hw_db_inline_filter_clear(ndev, db, *cfn_hw); + *cfn_hw = i; + } + } + + db->cfn[db_cfn_idx].ref = 1; + db->cfn[db_cfn_idx].cfn_hw = goal; + db->cfn[db_cfn_idx].priority = priority; + + return db_cfn_idx; +} + +static void hw_db_inline_free_prioritized_cfn(struct hw_db_inline_resource_db *db, int cfn_hw) +{ + for (uint32_t i = 0; i < db->nb_cat; ++i) { + if (db->cfn[i].cfn_hw == cfn_hw) { + memset(&db->cfn[i], 0x0, sizeof(struct hw_db_inline_resource_db_cfn)); + break; + } + } +} + +static void hw_db_inline_update_active_filters(struct flow_nic_dev *ndev, void *db_handle, + int group) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_flm_rcp *flm_rcp = &db->flm[group]; + struct hw_db_inline_resource_db_flm_cfn_map *cell; + + for (uint32_t match_set_idx = 0; match_set_idx < db->nb_cat; ++match_set_idx) { + for (uint32_t ft_idx = 0; ft_idx < db->nb_flm_ft; ++ft_idx) { + int active = flm_rcp->ft[ft_idx].ref > 0 && + flm_rcp->match_set[match_set_idx].ref > 0; + cell = &flm_rcp->cfn_map[match_set_idx * db->nb_flm_ft + ft_idx]; + + if (active && cell->cfn_idx == 0) { + /* Setup filter */ + cell->cfn_idx = hw_db_inline_alloc_prioritized_cfn(ndev, db, + flm_rcp->match_set[match_set_idx].idx); + hw_db_inline_filter_apply(ndev, db, db->cfn[cell->cfn_idx].cfn_hw, + flm_rcp->match_set[match_set_idx].idx, + flm_rcp->ft[ft_idx].idx, + group == 0 + ? db->match_set[flm_rcp->match_set[match_set_idx] + .idx.ids] + .data.action_set + : flm_rcp->ft[ft_idx].data.action_set); + } + + if (!active && cell->cfn_idx > 0) { + /* Teardown filter */ + hw_db_inline_filter_clear(ndev, db, db->cfn[cell->cfn_idx].cfn_hw); + hw_db_inline_free_prioritized_cfn(db, + db->cfn[cell->cfn_idx].cfn_hw); + cell->cfn_idx = 0; + } + } + } +} + + +/******************************************************************************/ +/* Match set */ +/******************************************************************************/ + +static int hw_db_inline_match_set_compare(const struct hw_db_inline_match_set_data *data1, + const struct hw_db_inline_match_set_data *data2) +{ + return data1->cat.raw == data2->cat.raw && data1->km.raw == data2->km.raw && + data1->km_ft.raw == data2->km_ft.raw && data1->jump == data2->jump; +} + +struct hw_db_match_set_idx +hw_db_inline_match_set_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_match_set_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_flm_rcp *flm_rcp = &db->flm[data->jump]; + struct hw_db_match_set_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_MATCH_SET; + + for (uint32_t i = 0; i < HW_DB_INLINE_MATCH_SET_NB; ++i) { + if (!found && db->match_set[i].ref <= 0) { + found = 1; + idx.ids = i; + } + + if (db->match_set[i].ref > 0 && + hw_db_inline_match_set_compare(data, &db->match_set[i].data)) { + idx.ids = i; + hw_db_inline_match_set_ref(ndev, db, idx); + return idx; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + found = 0; + + for (uint32_t i = 0; i < db->nb_cat; ++i) { + if (flm_rcp->match_set[i].ref <= 0) { + found = 1; + flm_rcp->match_set[i].ref = 1; + flm_rcp->match_set[i].idx.raw = idx.raw; + break; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + memcpy(&db->match_set[idx.ids].data, data, sizeof(struct hw_db_inline_match_set_data)); + db->match_set[idx.ids].ref = 1; + db->match_set[idx.ids].set_priority = ++db->set_priority_counter; + + hw_db_inline_update_active_filters(ndev, db, data->jump); + + return idx; +} + +void hw_db_inline_match_set_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_match_set_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->match_set[idx.ids].ref += 1; +} + +void hw_db_inline_match_set_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_match_set_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_flm_rcp *flm_rcp; + int jump; + + if (idx.error) + return; + + db->match_set[idx.ids].ref -= 1; + + if (db->match_set[idx.ids].ref > 0) + return; + + jump = db->match_set[idx.ids].data.jump; + flm_rcp = &db->flm[jump]; + + for (uint32_t i = 0; i < db->nb_cat; ++i) { + if (flm_rcp->match_set[i].idx.raw == idx.raw) { + flm_rcp->match_set[i].ref = 0; + hw_db_inline_update_active_filters(ndev, db, jump); + memset(&flm_rcp->match_set[i], 0x0, + sizeof(struct hw_db_inline_resource_db_flm_match_set)); + } + } + + memset(&db->match_set[idx.ids].data, 0x0, sizeof(struct hw_db_inline_match_set_data)); + db->match_set[idx.ids].ref = 0; +} + +/******************************************************************************/ +/* Action set */ +/******************************************************************************/ + +static int hw_db_inline_action_set_compare(const struct hw_db_inline_action_set_data *data1, + const struct hw_db_inline_action_set_data *data2) +{ + if (data1->contains_jump) + return data2->contains_jump && data1->jump == data2->jump; + + return data1->cot.raw == data2->cot.raw && data1->qsl.raw == data2->qsl.raw && + data1->slc_lr.raw == data2->slc_lr.raw && data1->tpe.raw == data2->tpe.raw && + data1->hsh.raw == data2->hsh.raw && data1->scrub.raw == data2->scrub.raw; +} + +struct hw_db_action_set_idx +hw_db_inline_action_set_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_action_set_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_action_set_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_ACTION_SET; + + for (uint32_t i = 0; i < HW_DB_INLINE_ACTION_SET_NB; ++i) { + if (!found && db->action_set[i].ref <= 0) { + found = 1; + idx.ids = i; + } + + if (db->action_set[i].ref > 0 && + hw_db_inline_action_set_compare(data, &db->action_set[i].data)) { + idx.ids = i; + hw_db_inline_action_set_ref(ndev, db, idx); + return idx; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + memcpy(&db->action_set[idx.ids].data, data, sizeof(struct hw_db_inline_action_set_data)); + db->action_set[idx.ids].ref = 1; + + return idx; +} + +void hw_db_inline_action_set_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_action_set_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->action_set[idx.ids].ref += 1; +} + +void hw_db_inline_action_set_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_action_set_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->action_set[idx.ids].ref -= 1; + + if (db->action_set[idx.ids].ref <= 0) { + memset(&db->action_set[idx.ids].data, 0x0, + sizeof(struct hw_db_inline_action_set_data)); + db->action_set[idx.ids].ref = 0; + } +} + +/******************************************************************************/ +/* COT */ +/******************************************************************************/ + +static int hw_db_inline_cot_compare(const struct hw_db_inline_cot_data *data1, + const struct hw_db_inline_cot_data *data2) +{ + return data1->matcher_color_contrib == data2->matcher_color_contrib && + data1->frag_rcp == data2->frag_rcp; +} + +struct hw_db_cot_idx hw_db_inline_cot_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_cot_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_cot_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_COT; + + for (uint32_t i = 1; i < db->nb_cot; ++i) { + int ref = db->cot[i].ref; + + if (ref > 0 && hw_db_inline_cot_compare(data, &db->cot[i].data)) { + idx.ids = i; + hw_db_inline_cot_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + db->cot[idx.ids].ref = 1; + memcpy(&db->cot[idx.ids].data, data, sizeof(struct hw_db_inline_cot_data)); + + return idx; +} + +void hw_db_inline_cot_ref(struct flow_nic_dev *ndev __rte_unused, void *db_handle, + struct hw_db_cot_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->cot[idx.ids].ref += 1; +} + +void hw_db_inline_cot_deref(struct flow_nic_dev *ndev __rte_unused, void *db_handle, + struct hw_db_cot_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->cot[idx.ids].ref -= 1; + + if (db->cot[idx.ids].ref <= 0) { + memset(&db->cot[idx.ids].data, 0x0, sizeof(struct hw_db_inline_cot_data)); + db->cot[idx.ids].ref = 0; + } +} + +/******************************************************************************/ +/* QSL */ +/******************************************************************************/ + +/* Calculate queue mask for QSL TBL_MSK for given number of queues. + * NOTE: If number of queues is not power of two, then queue mask will be created + * for nearest smaller power of two. + */ +static uint32_t queue_mask(uint32_t nr_queues) +{ + nr_queues |= nr_queues >> 1; + nr_queues |= nr_queues >> 2; + nr_queues |= nr_queues >> 4; + nr_queues |= nr_queues >> 8; + nr_queues |= nr_queues >> 16; + return nr_queues >> 1; +} + +static int hw_db_inline_qsl_compare(const struct hw_db_inline_qsl_data *data1, + const struct hw_db_inline_qsl_data *data2) +{ + if (data1->discard != data2->discard || data1->drop != data2->drop || + data1->table_size != data2->table_size || data1->retransmit != data2->retransmit) { + return 0; + } + + for (int i = 0; i < HW_DB_INLINE_MAX_QST_PER_QSL; ++i) { + if (data1->table[i].queue != data2->table[i].queue || + data1->table[i].queue_en != data2->table[i].queue_en || + data1->table[i].tx_port != data2->table[i].tx_port || + data1->table[i].tx_port_en != data2->table[i].tx_port_en) { + return 0; + } + } + + return 1; +} + +struct hw_db_qsl_idx hw_db_inline_qsl_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_qsl_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_qsl_idx qsl_idx = { .raw = 0 }; + uint32_t qst_idx = 0; + int res; + + qsl_idx.type = HW_DB_IDX_TYPE_QSL; + + if (data->discard) { + qsl_idx.ids = 0; + return qsl_idx; + } + + for (uint32_t i = 1; i < db->nb_qsl; ++i) { + if (hw_db_inline_qsl_compare(data, &db->qsl[i].data)) { + qsl_idx.ids = i; + hw_db_inline_qsl_ref(ndev, db, qsl_idx); + return qsl_idx; + } + } + + res = flow_nic_alloc_resource(ndev, RES_QSL_RCP, 1); + + if (res < 0) { + qsl_idx.error = 1; + return qsl_idx; + } + + qsl_idx.ids = res & 0xff; + + if (data->table_size > 0) { + res = flow_nic_alloc_resource_config(ndev, RES_QSL_QST, data->table_size, 1); + + if (res < 0) { + flow_nic_deref_resource(ndev, RES_QSL_RCP, qsl_idx.ids); + qsl_idx.error = 1; + return qsl_idx; + } + + qst_idx = (uint32_t)res; + } + + memcpy(&db->qsl[qsl_idx.ids].data, data, sizeof(struct hw_db_inline_qsl_data)); + db->qsl[qsl_idx.ids].qst_idx = qst_idx; + + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, qsl_idx.ids, 0x0); + + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, qsl_idx.ids, data->discard); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DROP, qsl_idx.ids, data->drop * 0x3); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_LR, qsl_idx.ids, data->retransmit * 0x3); + + if (data->table_size == 0) { + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_LO, qsl_idx.ids, 0x0); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_HI, qsl_idx.ids, 0x0); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_IDX, qsl_idx.ids, 0x0); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_MSK, qsl_idx.ids, 0x0); + + } else { + const uint32_t table_start = qst_idx; + const uint32_t table_end = table_start + data->table_size - 1; + + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_LO, qsl_idx.ids, table_start); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_HI, qsl_idx.ids, table_end); + + /* Toeplitz hash function uses TBL_IDX and TBL_MSK. */ + uint32_t msk = queue_mask(table_end - table_start + 1); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_IDX, qsl_idx.ids, table_start); + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_TBL_MSK, qsl_idx.ids, msk); + + for (uint32_t i = 0; i < data->table_size; ++i) { + hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, table_start + i, 0x0); + + hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_TX_PORT, table_start + i, + data->table[i].tx_port); + hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_LRE, table_start + i, + data->table[i].tx_port_en); + + hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_QUEUE, table_start + i, + data->table[i].queue); + hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_EN, table_start + i, + data->table[i].queue_en); + } + + hw_mod_qsl_qst_flush(&ndev->be, table_start, data->table_size); + } + + hw_mod_qsl_rcp_flush(&ndev->be, qsl_idx.ids, 1); + + return qsl_idx; +} + +void hw_db_inline_qsl_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx) +{ + (void)db_handle; + + if (!idx.error && idx.ids != 0) + flow_nic_ref_resource(ndev, RES_QSL_RCP, idx.ids); +} + +void hw_db_inline_qsl_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error || idx.ids == 0) + return; + + if (flow_nic_deref_resource(ndev, RES_QSL_RCP, idx.ids) == 0) { + const int table_size = (int)db->qsl[idx.ids].data.table_size; + + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, idx.ids, 0x0); + hw_mod_qsl_rcp_flush(&ndev->be, idx.ids, 1); + + if (table_size > 0) { + const int table_start = db->qsl[idx.ids].qst_idx; + + for (int i = 0; i < (int)table_size; ++i) { + hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, + table_start + i, 0x0); + flow_nic_free_resource(ndev, RES_QSL_QST, table_start + i); + } + + hw_mod_qsl_qst_flush(&ndev->be, table_start, table_size); + } + + memset(&db->qsl[idx.ids].data, 0x0, sizeof(struct hw_db_inline_qsl_data)); + db->qsl[idx.ids].qst_idx = 0; + } +} + +/******************************************************************************/ +/* SLC_LR */ +/******************************************************************************/ + +static int hw_db_inline_slc_lr_compare(const struct hw_db_inline_slc_lr_data *data1, + const struct hw_db_inline_slc_lr_data *data2) +{ + if (!data1->head_slice_en) + return data1->head_slice_en == data2->head_slice_en; + + return data1->head_slice_en == data2->head_slice_en && + data1->head_slice_dyn == data2->head_slice_dyn && + data1->head_slice_ofs == data2->head_slice_ofs; +} + +struct hw_db_slc_lr_idx hw_db_inline_slc_lr_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_slc_lr_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_slc_lr_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_SLC_LR; + + for (uint32_t i = 1; i < db->nb_slc_lr; ++i) { + int ref = db->slc_lr[i].ref; + + if (ref > 0 && hw_db_inline_slc_lr_compare(data, &db->slc_lr[i].data)) { + idx.ids = i; + hw_db_inline_slc_lr_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + db->slc_lr[idx.ids].ref = 1; + memcpy(&db->slc_lr[idx.ids].data, data, sizeof(struct hw_db_inline_slc_lr_data)); + + hw_mod_slc_lr_rcp_set(&ndev->be, HW_SLC_LR_RCP_HEAD_SLC_EN, idx.ids, data->head_slice_en); + hw_mod_slc_lr_rcp_set(&ndev->be, HW_SLC_LR_RCP_HEAD_DYN, idx.ids, data->head_slice_dyn); + hw_mod_slc_lr_rcp_set(&ndev->be, HW_SLC_LR_RCP_HEAD_OFS, idx.ids, data->head_slice_ofs); + hw_mod_slc_lr_rcp_flush(&ndev->be, idx.ids, 1); + + return idx; +} + +void hw_db_inline_slc_lr_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_slc_lr_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->slc_lr[idx.ids].ref += 1; +} + +void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_slc_lr_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->slc_lr[idx.ids].ref -= 1; + + if (db->slc_lr[idx.ids].ref <= 0) { + hw_mod_slc_lr_rcp_set(&ndev->be, HW_SLC_LR_RCP_PRESET_ALL, idx.ids, 0x0); + hw_mod_slc_lr_rcp_flush(&ndev->be, idx.ids, 1); + + memset(&db->slc_lr[idx.ids].data, 0x0, sizeof(struct hw_db_inline_slc_lr_data)); + db->slc_lr[idx.ids].ref = 0; + } +} + +/******************************************************************************/ +/* TPE */ +/******************************************************************************/ + +static int hw_db_inline_tpe_compare(const struct hw_db_inline_tpe_data *data1, + const struct hw_db_inline_tpe_data *data2) +{ + for (int i = 0; i < 6; ++i) + if (data1->writer[i].en != data2->writer[i].en || + data1->writer[i].reader_select != data2->writer[i].reader_select || + data1->writer[i].dyn != data2->writer[i].dyn || + data1->writer[i].ofs != data2->writer[i].ofs || + data1->writer[i].len != data2->writer[i].len) + return 0; + + return data1->insert_len == data2->insert_len && data1->new_outer == data2->new_outer && + data1->calc_eth_type_from_inner_ip == data2->calc_eth_type_from_inner_ip && + data1->ttl_en == data2->ttl_en && data1->ttl_dyn == data2->ttl_dyn && + data1->ttl_ofs == data2->ttl_ofs && data1->len_a_en == data2->len_a_en && + data1->len_a_pos_dyn == data2->len_a_pos_dyn && + data1->len_a_pos_ofs == data2->len_a_pos_ofs && + data1->len_a_add_dyn == data2->len_a_add_dyn && + data1->len_a_add_ofs == data2->len_a_add_ofs && + data1->len_a_sub_dyn == data2->len_a_sub_dyn && + data1->len_b_en == data2->len_b_en && + data1->len_b_pos_dyn == data2->len_b_pos_dyn && + data1->len_b_pos_ofs == data2->len_b_pos_ofs && + data1->len_b_add_dyn == data2->len_b_add_dyn && + data1->len_b_add_ofs == data2->len_b_add_ofs && + data1->len_b_sub_dyn == data2->len_b_sub_dyn && + data1->len_c_en == data2->len_c_en && + data1->len_c_pos_dyn == data2->len_c_pos_dyn && + data1->len_c_pos_ofs == data2->len_c_pos_ofs && + data1->len_c_add_dyn == data2->len_c_add_dyn && + data1->len_c_add_ofs == data2->len_c_add_ofs && + data1->len_c_sub_dyn == data2->len_c_sub_dyn; +} + +struct hw_db_tpe_idx hw_db_inline_tpe_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_tpe_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_tpe_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_TPE; + + for (uint32_t i = 1; i < db->nb_tpe; ++i) { + int ref = db->tpe[i].ref; + + if (ref > 0 && hw_db_inline_tpe_compare(data, &db->tpe[i].data)) { + idx.ids = i; + hw_db_inline_tpe_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + db->tpe[idx.ids].ref = 1; + memcpy(&db->tpe[idx.ids].data, data, sizeof(struct hw_db_inline_tpe_data)); + + if (data->insert_len > 0) { + hw_mod_tpe_rpp_rcp_set(&ndev->be, HW_TPE_RPP_RCP_EXP, idx.ids, data->insert_len); + hw_mod_tpe_rpp_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_DYN, idx.ids, 1); + hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_OFS, idx.ids, 0); + hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_LEN, idx.ids, data->insert_len); + hw_mod_tpe_ins_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_DYN, idx.ids, 1); + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_OFS, idx.ids, 0); + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_LEN, idx.ids, data->insert_len); + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_RPL_PTR, idx.ids, 0); + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_EXT_PRIO, idx.ids, 1); + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_ETH_TYPE_WR, idx.ids, + data->calc_eth_type_from_inner_ip); + hw_mod_tpe_rpl_rcp_flush(&ndev->be, idx.ids, 1); + } + + for (uint32_t i = 0; i < 6; ++i) { + if (data->writer[i].en) { + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_READER_SELECT, + idx.ids + db->nb_tpe * i, + data->writer[i].reader_select); + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_DYN, + idx.ids + db->nb_tpe * i, data->writer[i].dyn); + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_OFS, + idx.ids + db->nb_tpe * i, data->writer[i].ofs); + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_LEN, + idx.ids + db->nb_tpe * i, data->writer[i].len); + + } else { + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_READER_SELECT, + idx.ids + db->nb_tpe * i, 0); + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_DYN, + idx.ids + db->nb_tpe * i, 0); + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_OFS, + idx.ids + db->nb_tpe * i, 0); + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_LEN, + idx.ids + db->nb_tpe * i, 0); + } + + hw_mod_tpe_cpy_rcp_flush(&ndev->be, idx.ids + db->nb_tpe * i, 1); + } + + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_WR, idx.ids, data->len_a_en); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN, idx.ids, + data->new_outer); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_POS_DYN, idx.ids, + data->len_a_pos_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_POS_OFS, idx.ids, + data->len_a_pos_ofs); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_ADD_DYN, idx.ids, + data->len_a_add_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_ADD_OFS, idx.ids, + data->len_a_add_ofs); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_SUB_DYN, idx.ids, + data->len_a_sub_dyn); + + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_WR, idx.ids, data->len_b_en); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_POS_DYN, idx.ids, + data->len_b_pos_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_POS_OFS, idx.ids, + data->len_b_pos_ofs); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_ADD_DYN, idx.ids, + data->len_b_add_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_ADD_OFS, idx.ids, + data->len_b_add_ofs); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_SUB_DYN, idx.ids, + data->len_b_sub_dyn); + + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_WR, idx.ids, data->len_c_en); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_POS_DYN, idx.ids, + data->len_c_pos_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_POS_OFS, idx.ids, + data->len_c_pos_ofs); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_ADD_DYN, idx.ids, + data->len_c_add_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_ADD_OFS, idx.ids, + data->len_c_add_ofs); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_SUB_DYN, idx.ids, + data->len_c_sub_dyn); + + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_WR, idx.ids, data->ttl_en); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_POS_DYN, idx.ids, data->ttl_dyn); + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_POS_OFS, idx.ids, data->ttl_ofs); + hw_mod_tpe_hfu_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_OUTER_L3_CMD, idx.ids, 3); + hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_OUTER_L4_CMD, idx.ids, 3); + hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_INNER_L3_CMD, idx.ids, 3); + hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_INNER_L4_CMD, idx.ids, 3); + hw_mod_tpe_csu_rcp_flush(&ndev->be, idx.ids, 1); + + return idx; +} + +void hw_db_inline_tpe_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->tpe[idx.ids].ref += 1; +} + +void hw_db_inline_tpe_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->tpe[idx.ids].ref -= 1; + + if (db->tpe[idx.ids].ref <= 0) { + for (uint32_t i = 0; i < 6; ++i) { + hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, + idx.ids + db->nb_tpe * i, 0); + hw_mod_tpe_cpy_rcp_flush(&ndev->be, idx.ids + db->nb_tpe * i, 1); + } + + hw_mod_tpe_rpp_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0); + hw_mod_tpe_rpp_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0); + hw_mod_tpe_ins_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0); + hw_mod_tpe_rpl_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0); + hw_mod_tpe_hfu_rcp_flush(&ndev->be, idx.ids, 1); + + hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0); + hw_mod_tpe_csu_rcp_flush(&ndev->be, idx.ids, 1); + + memset(&db->tpe[idx.ids].data, 0x0, sizeof(struct hw_db_inline_tpe_data)); + db->tpe[idx.ids].ref = 0; + } +} + +/******************************************************************************/ +/* TPE_EXT */ +/******************************************************************************/ + +static int hw_db_inline_tpe_ext_compare(const struct hw_db_inline_tpe_ext_data *data1, + const struct hw_db_inline_tpe_ext_data *data2) +{ + return data1->size == data2->size && + memcmp(data1->hdr8, data2->hdr8, HW_DB_INLINE_MAX_ENCAP_SIZE) == 0; +} + +struct hw_db_tpe_ext_idx hw_db_inline_tpe_ext_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_tpe_ext_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_tpe_ext_idx idx = { .raw = 0 }; + int rpl_rpl_length = ((int)data->size + 15) / 16; + int found = 0, rpl_rpl_index = 0; + + idx.type = HW_DB_IDX_TYPE_TPE_EXT; + + if (data->size > HW_DB_INLINE_MAX_ENCAP_SIZE) { + idx.error = 1; + return idx; + } + + for (uint32_t i = 1; i < db->nb_tpe_ext; ++i) { + int ref = db->tpe_ext[i].ref; + + if (ref > 0 && hw_db_inline_tpe_ext_compare(data, &db->tpe_ext[i].data)) { + idx.ids = i; + hw_db_inline_tpe_ext_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + rpl_rpl_index = flow_nic_alloc_resource_config(ndev, RES_TPE_RPL, rpl_rpl_length, 1); + + if (rpl_rpl_index < 0) { + idx.error = 1; + return idx; + } + + db->tpe_ext[idx.ids].ref = 1; + db->tpe_ext[idx.ids].replace_ram_idx = rpl_rpl_index; + memcpy(&db->tpe_ext[idx.ids].data, data, sizeof(struct hw_db_inline_tpe_ext_data)); + + hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_RPL_EXT_RPL_PTR, idx.ids, rpl_rpl_index); + hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_RPL_EXT_META_RPL_LEN, idx.ids, data->size); + hw_mod_tpe_rpl_ext_flush(&ndev->be, idx.ids, 1); + + for (int i = 0; i < rpl_rpl_length; ++i) { + uint32_t rpl_data[4]; + memcpy(rpl_data, data->hdr32 + i * 4, sizeof(rpl_data)); + hw_mod_tpe_rpl_rpl_set(&ndev->be, HW_TPE_RPL_RPL_VALUE, rpl_rpl_index + i, + rpl_data); + } + + hw_mod_tpe_rpl_rpl_flush(&ndev->be, rpl_rpl_index, rpl_rpl_length); + + return idx; +} + +void hw_db_inline_tpe_ext_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_tpe_ext_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->tpe_ext[idx.ids].ref += 1; +} + +void hw_db_inline_tpe_ext_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_tpe_ext_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->tpe_ext[idx.ids].ref -= 1; + + if (db->tpe_ext[idx.ids].ref <= 0) { + const int rpl_rpl_length = ((int)db->tpe_ext[idx.ids].data.size + 15) / 16; + const int rpl_rpl_index = db->tpe_ext[idx.ids].replace_ram_idx; + + hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0); + hw_mod_tpe_rpl_ext_flush(&ndev->be, idx.ids, 1); + + for (int i = 0; i < rpl_rpl_length; ++i) { + uint32_t rpl_zero[] = { 0, 0, 0, 0 }; + hw_mod_tpe_rpl_rpl_set(&ndev->be, HW_TPE_RPL_RPL_VALUE, rpl_rpl_index + i, + rpl_zero); + flow_nic_free_resource(ndev, RES_TPE_RPL, rpl_rpl_index + i); + } + + hw_mod_tpe_rpl_rpl_flush(&ndev->be, rpl_rpl_index, rpl_rpl_length); + + memset(&db->tpe_ext[idx.ids].data, 0x0, sizeof(struct hw_db_inline_tpe_ext_data)); + db->tpe_ext[idx.ids].ref = 0; + } +} + + +/******************************************************************************/ +/* CAT */ +/******************************************************************************/ + +static int hw_db_inline_cat_compare(const struct hw_db_inline_cat_data *data1, + const struct hw_db_inline_cat_data *data2) +{ + return data1->vlan_mask == data2->vlan_mask && + data1->mac_port_mask == data2->mac_port_mask && + data1->ptc_mask_frag == data2->ptc_mask_frag && + data1->ptc_mask_l2 == data2->ptc_mask_l2 && + data1->ptc_mask_l3 == data2->ptc_mask_l3 && + data1->ptc_mask_l4 == data2->ptc_mask_l4 && + data1->ptc_mask_tunnel == data2->ptc_mask_tunnel && + data1->ptc_mask_l3_tunnel == data2->ptc_mask_l3_tunnel && + data1->ptc_mask_l4_tunnel == data2->ptc_mask_l4_tunnel && + data1->err_mask_ttl_tunnel == data2->err_mask_ttl_tunnel && + data1->err_mask_ttl == data2->err_mask_ttl && data1->ip_prot == data2->ip_prot && + data1->ip_prot_tunnel == data2->ip_prot_tunnel; +} + +struct hw_db_cat_idx hw_db_inline_cat_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_cat_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_cat_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_CAT; + + for (uint32_t i = 0; i < db->nb_cat; ++i) { + int ref = db->cat[i].ref; + + if (ref > 0 && hw_db_inline_cat_compare(data, &db->cat[i].data)) { + idx.ids = i; + hw_db_inline_cat_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + db->cat[idx.ids].ref = 1; + memcpy(&db->cat[idx.ids].data, data, sizeof(struct hw_db_inline_cat_data)); + + return idx; +} + +void hw_db_inline_cat_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cat_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->cat[idx.ids].ref += 1; +} + +void hw_db_inline_cat_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cat_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->cat[idx.ids].ref -= 1; + + if (db->cat[idx.ids].ref <= 0) { + memset(&db->cat[idx.ids].data, 0x0, sizeof(struct hw_db_inline_cat_data)); + db->cat[idx.ids].ref = 0; + } +} + +/******************************************************************************/ +/* KM RCP */ +/******************************************************************************/ + +static int hw_db_inline_km_compare(const struct hw_db_inline_km_rcp_data *data1, + const struct hw_db_inline_km_rcp_data *data2) +{ + return data1->rcp == data2->rcp; +} + +struct hw_db_km_idx hw_db_inline_km_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_km_rcp_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_km_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_KM_RCP; + + for (uint32_t i = 0; i < db->nb_km_rcp; ++i) { + if (!found && db->km[i].ref <= 0) { + found = 1; + idx.id1 = i; + } + + if (db->km[i].ref > 0 && hw_db_inline_km_compare(data, &db->km[i].data)) { + idx.id1 = i; + hw_db_inline_km_ref(ndev, db, idx); + return idx; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + memcpy(&db->km[idx.id1].data, data, sizeof(struct hw_db_inline_km_rcp_data)); + db->km[idx.id1].ref = 1; + + return idx; +} + +void hw_db_inline_km_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->km[idx.id1].ref += 1; +} + +void hw_db_inline_km_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->flm[idx.id1].ref -= 1; + + if (db->flm[idx.id1].ref <= 0) { + memset(&db->flm[idx.id1].data, 0x0, sizeof(struct hw_db_inline_km_rcp_data)); + db->flm[idx.id1].ref = 0; + } +} + +/******************************************************************************/ +/* KM FT */ +/******************************************************************************/ + +static int hw_db_inline_km_ft_compare(const struct hw_db_inline_km_ft_data *data1, + const struct hw_db_inline_km_ft_data *data2) +{ + return data1->cat.raw == data2->cat.raw && data1->km.raw == data2->km.raw && + data1->action_set.raw == data2->action_set.raw; +} + +struct hw_db_km_ft hw_db_inline_km_ft_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_km_ft_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_km_rcp *km_rcp = &db->km[data->km.id1]; + struct hw_db_km_ft idx = { .raw = 0 }; + uint32_t cat_offset = data->cat.ids * db->nb_cat; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_KM_FT; + idx.id2 = data->km.id1; + idx.id3 = data->cat.ids; + + if (km_rcp->data.rcp == 0) { + idx.id1 = 0; + return idx; + } + + for (uint32_t i = 1; i < db->nb_km_ft; ++i) { + const struct hw_db_inline_resource_db_km_ft *km_ft = &km_rcp->ft[cat_offset + i]; + + if (!found && km_ft->ref <= 0) { + found = 1; + idx.id1 = i; + } + + if (km_ft->ref > 0 && hw_db_inline_km_ft_compare(data, &km_ft->data)) { + idx.id1 = i; + hw_db_inline_km_ft_ref(ndev, db, idx); + return idx; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + memcpy(&km_rcp->ft[cat_offset + idx.id1].data, data, + sizeof(struct hw_db_inline_km_ft_data)); + km_rcp->ft[cat_offset + idx.id1].ref = 1; + + return idx; +} + +void hw_db_inline_km_ft_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_ft idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) { + uint32_t cat_offset = idx.id3 * db->nb_cat; + db->km[idx.id2].ft[cat_offset + idx.id1].ref += 1; + } +} + +void hw_db_inline_km_ft_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_ft idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_km_rcp *km_rcp = &db->km[idx.id2]; + uint32_t cat_offset = idx.id3 * db->nb_cat; + + if (idx.error) + return; + + km_rcp->ft[cat_offset + idx.id1].ref -= 1; + + if (km_rcp->ft[cat_offset + idx.id1].ref <= 0) { + memset(&km_rcp->ft[cat_offset + idx.id1].data, 0x0, + sizeof(struct hw_db_inline_km_ft_data)); + km_rcp->ft[cat_offset + idx.id1].ref = 0; + } +} + +/******************************************************************************/ +/* FLM RCP */ +/******************************************************************************/ + +static int hw_db_inline_flm_compare(const struct hw_db_inline_flm_rcp_data *data1, + const struct hw_db_inline_flm_rcp_data *data2) +{ + if (data1->qw0_dyn != data2->qw0_dyn || data1->qw0_ofs != data2->qw0_ofs || + data1->qw4_dyn != data2->qw4_dyn || data1->qw4_ofs != data2->qw4_ofs || + data1->sw8_dyn != data2->sw8_dyn || data1->sw8_ofs != data2->sw8_ofs || + data1->sw9_dyn != data2->sw9_dyn || data1->sw9_ofs != data2->sw9_ofs || + data1->outer_prot != data2->outer_prot || data1->inner_prot != data2->inner_prot) { + return 0; + } + + for (int i = 0; i < 10; ++i) + if (data1->mask[i] != data2->mask[i]) + return 0; + + return 1; +} + +struct hw_db_flm_idx hw_db_inline_flm_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_flm_rcp_data *data, int group) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_flm_idx idx = { .raw = 0 }; + + idx.type = HW_DB_IDX_TYPE_FLM_RCP; + idx.id1 = group; + + if (group == 0) + return idx; + + if (db->flm[idx.id1].ref > 0) { + if (!hw_db_inline_flm_compare(data, &db->flm[idx.id1].data)) { + idx.error = 1; + return idx; + } + + hw_db_inline_flm_ref(ndev, db, idx); + return idx; + } + + db->flm[idx.id1].ref = 1; + memcpy(&db->flm[idx.id1].data, data, sizeof(struct hw_db_inline_flm_rcp_data)); + + { + uint32_t flm_mask[10] = { + data->mask[0], /* SW9 */ + data->mask[1], /* SW8 */ + data->mask[5], data->mask[4], data->mask[3], data->mask[2], /* QW4 */ + data->mask[9], data->mask[8], data->mask[7], data->mask[6], /* QW0 */ + }; + + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, idx.id1, 0x0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_LOOKUP, idx.id1, 1); + + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_QW0_DYN, idx.id1, data->qw0_dyn); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_QW0_OFS, idx.id1, data->qw0_ofs); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_QW0_SEL, idx.id1, 0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_QW4_DYN, idx.id1, data->qw4_dyn); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_QW4_OFS, idx.id1, data->qw4_ofs); + + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_SW8_DYN, idx.id1, data->sw8_dyn); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_SW8_OFS, idx.id1, data->sw8_ofs); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_SW8_SEL, idx.id1, 0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_SW9_DYN, idx.id1, data->sw9_dyn); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_SW9_OFS, idx.id1, data->sw9_ofs); + + hw_mod_flm_rcp_set_mask(&ndev->be, HW_FLM_RCP_MASK, idx.id1, flm_mask); + + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_KID, idx.id1, idx.id1 + 2); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_OPN, idx.id1, data->outer_prot ? 1 : 0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_IPN, idx.id1, data->inner_prot ? 1 : 0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_BYT_DYN, idx.id1, 0); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_BYT_OFS, idx.id1, -20); + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_TXPLM, idx.id1, UINT32_MAX); + + hw_mod_flm_rcp_flush(&ndev->be, idx.id1, 1); + } + + return idx; +} + +void hw_db_inline_flm_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->flm[idx.id1].ref += 1; +} + +void hw_db_inline_flm_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + if (idx.id1 > 0) { + db->flm[idx.id1].ref -= 1; + + if (db->flm[idx.id1].ref <= 0) { + memset(&db->flm[idx.id1].data, 0x0, + sizeof(struct hw_db_inline_flm_rcp_data)); + db->flm[idx.id1].ref = 0; + + hw_db_inline_setup_default_flm_rcp(ndev, idx.id1); + } + } +} + +/******************************************************************************/ +/* FLM FT */ +/******************************************************************************/ + +static int hw_db_inline_flm_ft_compare(const struct hw_db_inline_flm_ft_data *data1, + const struct hw_db_inline_flm_ft_data *data2) +{ + return data1->is_group_zero == data2->is_group_zero && data1->jump == data2->jump && + data1->action_set.raw == data2->action_set.raw; +} + +struct hw_db_flm_ft hw_db_inline_flm_ft_default(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_flm_ft_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_flm_rcp *flm_rcp = &db->flm[data->jump]; + struct hw_db_flm_ft idx = { .raw = 0 }; + + idx.type = HW_DB_IDX_TYPE_FLM_FT; + idx.id1 = 0; + idx.id2 = data->group & 0xff; + + if (data->is_group_zero) { + idx.error = 1; + return idx; + } + + if (flm_rcp->ft[idx.id1].ref > 0) { + if (!hw_db_inline_flm_ft_compare(data, &flm_rcp->ft[idx.id1].data)) { + idx.error = 1; + return idx; + } + + hw_db_inline_flm_ft_ref(ndev, db, idx); + return idx; + } + + memcpy(&flm_rcp->ft[idx.id1].data, data, sizeof(struct hw_db_inline_flm_ft_data)); + flm_rcp->ft[idx.id1].idx.raw = idx.raw; + flm_rcp->ft[idx.id1].ref = 1; + + hw_db_inline_update_active_filters(ndev, db, data->jump); + + return idx; +} + +struct hw_db_flm_ft hw_db_inline_flm_ft_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_flm_ft_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_flm_rcp *flm_rcp = &db->flm[data->group]; + struct hw_db_flm_ft idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_FLM_FT; + idx.id1 = 0; + idx.id2 = data->group & 0xff; + + /* RCP 0 always uses FT 1; i.e. use unhandled FT for disabled RCP */ + if (data->group == 0) { + idx.id1 = 1; + return idx; + } + + if (data->is_group_zero) { + idx.id3 = 1; + return idx; + } + + /* FLM_FT records 0, 1 and last (15) are reserved */ + /* NOTE: RES_FLM_FLOW_TYPE resource is global and it cannot be used in _add() and _deref() + * to track usage of FLM_FT recipes which are group specific. + */ + for (uint32_t i = 2; i < db->nb_flm_ft; ++i) { + if (!found && flm_rcp->ft[i].ref <= 0 && + !flow_nic_is_resource_used(ndev, RES_FLM_FLOW_TYPE, i)) { + found = 1; + idx.id1 = i; + } + + if (flm_rcp->ft[i].ref > 0 && + hw_db_inline_flm_ft_compare(data, &flm_rcp->ft[i].data)) { + idx.id1 = i; + hw_db_inline_flm_ft_ref(ndev, db, idx); + return idx; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + memcpy(&flm_rcp->ft[idx.id1].data, data, sizeof(struct hw_db_inline_flm_ft_data)); + flm_rcp->ft[idx.id1].idx.raw = idx.raw; + flm_rcp->ft[idx.id1].ref = 1; + + hw_db_inline_update_active_filters(ndev, db, data->group); + + return idx; +} + +void hw_db_inline_flm_ft_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_ft idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error && idx.id3 == 0) + db->flm[idx.id2].ft[idx.id1].ref += 1; +} + +void hw_db_inline_flm_ft_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_ft idx) +{ + (void)ndev; + (void)db_handle; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_inline_resource_db_flm_rcp *flm_rcp; + + if (idx.error || idx.id2 == 0 || idx.id3 > 0) + return; + + flm_rcp = &db->flm[idx.id2]; + + flm_rcp->ft[idx.id1].ref -= 1; + + if (flm_rcp->ft[idx.id1].ref > 0) + return; + + flm_rcp->ft[idx.id1].ref = 0; + hw_db_inline_update_active_filters(ndev, db, idx.id2); + memset(&flm_rcp->ft[idx.id1], 0x0, sizeof(struct hw_db_inline_resource_db_flm_ft)); +} + +/******************************************************************************/ +/* HSH */ +/******************************************************************************/ + +static int hw_db_inline_hsh_compare(const struct hw_db_inline_hsh_data *data1, + const struct hw_db_inline_hsh_data *data2) +{ + for (uint32_t i = 0; i < MAX_RSS_KEY_LEN; ++i) + if (data1->key[i] != data2->key[i]) + return 0; + + return data1->func == data2->func && data1->hash_mask == data2->hash_mask; +} + +struct hw_db_hsh_idx hw_db_inline_hsh_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_hsh_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_hsh_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_HSH; + + /* check if default hash configuration shall be used, i.e. rss_hf is not set */ + /* + * NOTE: hsh id 0 is reserved for "default" + * HSH used by port configuration; All ports share the same default hash settings. + */ + if (data->hash_mask == 0) { + idx.ids = 0; + hw_db_inline_hsh_ref(ndev, db, idx); + return idx; + } + + for (uint32_t i = 1; i < db->nb_hsh; ++i) { + int ref = db->hsh[i].ref; + + if (ref > 0 && hw_db_inline_hsh_compare(data, &db->hsh[i].data)) { + idx.ids = i; + hw_db_inline_hsh_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + struct nt_eth_rss_conf tmp_rss_conf; + + tmp_rss_conf.rss_hf = data->hash_mask; + memcpy(tmp_rss_conf.rss_key, data->key, MAX_RSS_KEY_LEN); + tmp_rss_conf.algorithm = data->func; + int res = flow_nic_set_hasher_fields(ndev, idx.ids, tmp_rss_conf); + + if (res != 0) { + idx.error = 1; + return idx; + } + + db->hsh[idx.ids].ref = 1; + memcpy(&db->hsh[idx.ids].data, data, sizeof(struct hw_db_inline_hsh_data)); + flow_nic_mark_resource_used(ndev, RES_HSH_RCP, idx.ids); + + hw_mod_hsh_rcp_flush(&ndev->be, idx.ids, 1); + + return idx; +} + +void hw_db_inline_hsh_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx) +{ + (void)ndev; + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->hsh[idx.ids].ref += 1; +} + +void hw_db_inline_hsh_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->hsh[idx.ids].ref -= 1; + + if (db->hsh[idx.ids].ref <= 0) { + /* + * NOTE: hsh id 0 is reserved for "default" HSH used by + * port configuration, so we shall keep it even if + * it is not used by any flow + */ + if (idx.ids > 0) { + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, idx.ids, 0, 0x0); + hw_mod_hsh_rcp_flush(&ndev->be, idx.ids, 1); + + memset(&db->hsh[idx.ids].data, 0x0, sizeof(struct hw_db_inline_hsh_data)); + flow_nic_free_resource(ndev, RES_HSH_RCP, idx.ids); + } + + db->hsh[idx.ids].ref = 0; + } +} + +/******************************************************************************/ +/* FML SCRUB */ +/******************************************************************************/ + +static int hw_db_inline_scrub_compare(const struct hw_db_inline_scrub_data *data1, + const struct hw_db_inline_scrub_data *data2) +{ + return data1->timeout == data2->timeout; +} + +struct hw_db_flm_scrub_idx hw_db_inline_scrub_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_scrub_data *data) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + struct hw_db_flm_scrub_idx idx = { .raw = 0 }; + int found = 0; + + idx.type = HW_DB_IDX_TYPE_FLM_SCRUB; + + /* NOTE: scrub id 0 is reserved for "default" timeout 0, i.e. flow will never AGE-out */ + if (data->timeout == 0) { + idx.ids = 0; + hw_db_inline_scrub_ref(ndev, db, idx); + return idx; + } + + for (uint32_t i = 1; i < db->nb_scrub; ++i) { + int ref = db->scrub[i].ref; + + if (ref > 0 && hw_db_inline_scrub_compare(data, &db->scrub[i].data)) { + idx.ids = i; + hw_db_inline_scrub_ref(ndev, db, idx); + return idx; + } + + if (!found && ref <= 0) { + found = 1; + idx.ids = i; + } + } + + if (!found) { + idx.error = 1; + return idx; + } + + int res = hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_T, idx.ids, data->timeout); + res |= hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_R, idx.ids, + NTNIC_SCANNER_TIMEOUT_RESOLUTION); + res |= hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_DEL, idx.ids, SCRUB_DEL); + res |= hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_INF, idx.ids, SCRUB_INF); + + if (res != 0) { + idx.error = 1; + return idx; + } + + db->scrub[idx.ids].ref = 1; + memcpy(&db->scrub[idx.ids].data, data, sizeof(struct hw_db_inline_scrub_data)); + flow_nic_mark_resource_used(ndev, RES_SCRUB_RCP, idx.ids); + + hw_mod_flm_scrub_flush(&ndev->be, idx.ids, 1); + + return idx; +} + +void hw_db_inline_scrub_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_flm_scrub_idx idx) +{ + (void)ndev; + + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (!idx.error) + db->scrub[idx.ids].ref += 1; +} + +void hw_db_inline_scrub_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_flm_scrub_idx idx) +{ + struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle; + + if (idx.error) + return; + + db->scrub[idx.ids].ref -= 1; + + if (db->scrub[idx.ids].ref <= 0) { + /* NOTE: scrub id 0 is reserved for "default" timeout 0, which shall not be removed + */ + if (idx.ids > 0) { + hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_T, idx.ids, 0); + hw_mod_flm_scrub_flush(&ndev->be, idx.ids, 1); + + memset(&db->scrub[idx.ids].data, 0x0, + sizeof(struct hw_db_inline_scrub_data)); + flow_nic_free_resource(ndev, RES_SCRUB_RCP, idx.ids); + } + + db->scrub[idx.ids].ref = 0; + } +} diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h new file mode 100644 index 0000000000..aa046b68a7 --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h @@ -0,0 +1,394 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef _FLOW_API_HW_DB_INLINE_H_ +#define _FLOW_API_HW_DB_INLINE_H_ + +#include + +#include "flow_api.h" + +#define HW_DB_INLINE_MAX_QST_PER_QSL 128 +#define HW_DB_INLINE_MAX_ENCAP_SIZE 128 + +#define HW_DB_IDX \ + union { \ + struct { \ + uint32_t id1 : 8; \ + uint32_t id2 : 8; \ + uint32_t id3 : 8; \ + uint32_t type : 7; \ + uint32_t error : 1; \ + }; \ + struct { \ + uint32_t ids : 24; \ + }; \ + uint32_t raw; \ + } + +/* Strongly typed int types */ +struct hw_db_idx { + HW_DB_IDX; +}; + +struct hw_db_match_set_idx { + HW_DB_IDX; +}; + +struct hw_db_action_set_idx { + HW_DB_IDX; +}; + +struct hw_db_cot_idx { + HW_DB_IDX; +}; + +struct hw_db_qsl_idx { + HW_DB_IDX; +}; + +struct hw_db_cat_idx { + HW_DB_IDX; +}; + +struct hw_db_slc_lr_idx { + HW_DB_IDX; +}; + +struct hw_db_inline_tpe_data { + uint32_t insert_len : 16; + uint32_t new_outer : 1; + uint32_t calc_eth_type_from_inner_ip : 1; + uint32_t ttl_en : 1; + uint32_t ttl_dyn : 5; + uint32_t ttl_ofs : 8; + + struct { + uint32_t en : 1; + uint32_t reader_select : 3; + uint32_t dyn : 5; + uint32_t ofs : 14; + uint32_t len : 5; + uint32_t padding : 4; + } writer[6]; + + uint32_t len_a_en : 1; + uint32_t len_a_pos_dyn : 5; + uint32_t len_a_pos_ofs : 8; + uint32_t len_a_add_dyn : 5; + uint32_t len_a_add_ofs : 8; + uint32_t len_a_sub_dyn : 5; + + uint32_t len_b_en : 1; + uint32_t len_b_pos_dyn : 5; + uint32_t len_b_pos_ofs : 8; + uint32_t len_b_add_dyn : 5; + uint32_t len_b_add_ofs : 8; + uint32_t len_b_sub_dyn : 5; + + uint32_t len_c_en : 1; + uint32_t len_c_pos_dyn : 5; + uint32_t len_c_pos_ofs : 8; + uint32_t len_c_add_dyn : 5; + uint32_t len_c_add_ofs : 8; + uint32_t len_c_sub_dyn : 5; +}; + +struct hw_db_inline_tpe_ext_data { + uint32_t size; + union { + uint8_t hdr8[HW_DB_INLINE_MAX_ENCAP_SIZE]; + uint32_t hdr32[(HW_DB_INLINE_MAX_ENCAP_SIZE + 3) / 4]; + }; +}; + +struct hw_db_tpe_idx { + HW_DB_IDX; +}; +struct hw_db_tpe_ext_idx { + HW_DB_IDX; +}; + +struct hw_db_flm_idx { + HW_DB_IDX; +}; +struct hw_db_flm_ft { + HW_DB_IDX; +}; + +struct hw_db_flm_scrub_idx { + HW_DB_IDX; +}; + +struct hw_db_km_idx { + HW_DB_IDX; +}; + +struct hw_db_km_ft { + HW_DB_IDX; +}; + +struct hw_db_hsh_idx { + HW_DB_IDX; +}; + +enum hw_db_idx_type { + HW_DB_IDX_TYPE_NONE = 0, + + HW_DB_IDX_TYPE_MATCH_SET, + HW_DB_IDX_TYPE_ACTION_SET, + + HW_DB_IDX_TYPE_COT, + HW_DB_IDX_TYPE_CAT, + HW_DB_IDX_TYPE_QSL, + HW_DB_IDX_TYPE_SLC_LR, + HW_DB_IDX_TYPE_TPE, + HW_DB_IDX_TYPE_TPE_EXT, + + HW_DB_IDX_TYPE_FLM_RCP, + HW_DB_IDX_TYPE_KM_RCP, + HW_DB_IDX_TYPE_FLM_FT, + HW_DB_IDX_TYPE_FLM_SCRUB, + HW_DB_IDX_TYPE_KM_FT, + HW_DB_IDX_TYPE_HSH, +}; + +/* Container types */ +struct hw_db_inline_match_set_data { + struct hw_db_cat_idx cat; + struct hw_db_km_idx km; + struct hw_db_km_ft km_ft; + struct hw_db_action_set_idx action_set; + int jump; + + uint8_t priority; +}; + +struct hw_db_inline_action_set_data { + int contains_jump; + union { + int jump; + struct { + struct hw_db_cot_idx cot; + struct hw_db_qsl_idx qsl; + struct hw_db_slc_lr_idx slc_lr; + struct hw_db_tpe_idx tpe; + struct hw_db_hsh_idx hsh; + struct hw_db_flm_scrub_idx scrub; + }; + }; +}; + +struct hw_db_inline_km_rcp_data { + uint32_t rcp; +}; + +struct hw_db_inline_km_ft_data { + struct hw_db_cat_idx cat; + struct hw_db_km_idx km; + struct hw_db_action_set_idx action_set; +}; + +struct hw_db_inline_flm_ft_data { + /* Group zero flows should set jump. */ + /* Group nonzero flows should set group. */ + int is_group_zero; + union { + int jump; + int group; + }; + + struct hw_db_action_set_idx action_set; +}; + +/* Functionality data types */ +struct hw_db_inline_cat_data { + uint32_t vlan_mask : 4; + uint32_t mac_port_mask : 8; + uint32_t ptc_mask_frag : 4; + uint32_t ptc_mask_l2 : 7; + uint32_t ptc_mask_l3 : 3; + uint32_t ptc_mask_l4 : 5; + uint32_t padding0 : 1; + + uint32_t ptc_mask_tunnel : 11; + uint32_t ptc_mask_l3_tunnel : 3; + uint32_t ptc_mask_l4_tunnel : 5; + uint32_t err_mask_ttl_tunnel : 2; + uint32_t err_mask_ttl : 2; + uint32_t padding1 : 9; + + uint8_t ip_prot; + uint8_t ip_prot_tunnel; +}; + +struct hw_db_inline_flm_rcp_data { + uint64_t qw0_dyn : 5; + uint64_t qw0_ofs : 8; + uint64_t qw4_dyn : 5; + uint64_t qw4_ofs : 8; + uint64_t sw8_dyn : 5; + uint64_t sw8_ofs : 8; + uint64_t sw9_dyn : 5; + uint64_t sw9_ofs : 8; + uint64_t outer_prot : 1; + uint64_t inner_prot : 1; + uint64_t padding : 10; + + uint32_t mask[10]; +}; + +struct hw_db_inline_qsl_data { + uint32_t discard : 1; + uint32_t drop : 1; + uint32_t table_size : 7; + uint32_t retransmit : 1; + uint32_t padding : 22; + + struct { + uint16_t queue : 7; + uint16_t queue_en : 1; + uint16_t tx_port : 3; + uint16_t tx_port_en : 1; + uint16_t padding : 4; + } table[HW_DB_INLINE_MAX_QST_PER_QSL]; +}; + +struct hw_db_inline_cot_data { + uint32_t matcher_color_contrib : 4; + uint32_t frag_rcp : 4; + uint32_t padding : 24; +}; + +struct hw_db_inline_slc_lr_data { + uint32_t head_slice_en : 1; + uint32_t head_slice_dyn : 5; + uint32_t head_slice_ofs : 8; + uint32_t padding : 18; +}; + +struct hw_db_inline_hsh_data { + uint32_t func; + uint64_t hash_mask; + uint8_t key[MAX_RSS_KEY_LEN]; +}; + +struct hw_db_inline_scrub_data { + uint32_t timeout; +}; + +/**/ + +int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle); +void hw_db_inline_destroy(void *db_handle); + +void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_idx *idxs, + uint32_t size); +const void *hw_db_inline_find_data(struct flow_nic_dev *ndev, void *db_handle, + enum hw_db_idx_type type, struct hw_db_idx *idxs, uint32_t size); +struct hw_db_idx *hw_db_inline_find_idx(struct flow_nic_dev *ndev, void *db_handle, + enum hw_db_idx_type type, struct hw_db_idx *idxs, uint32_t size); +void hw_db_inline_dump(struct flow_nic_dev *ndev, void *db_handle, const struct hw_db_idx *idxs, + uint32_t size, FILE *file); +void hw_db_inline_dump_cfn(struct flow_nic_dev *ndev, void *db_handle, FILE *file); + +/**/ + +struct hw_db_match_set_idx +hw_db_inline_match_set_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_match_set_data *data); +void hw_db_inline_match_set_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_match_set_idx idx); +void hw_db_inline_match_set_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_match_set_idx idx); + +struct hw_db_action_set_idx +hw_db_inline_action_set_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_action_set_data *data); +void hw_db_inline_action_set_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_action_set_idx idx); +void hw_db_inline_action_set_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_action_set_idx idx); + +/**/ + +struct hw_db_cot_idx hw_db_inline_cot_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_cot_data *data); +void hw_db_inline_cot_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cot_idx idx); +void hw_db_inline_cot_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cot_idx idx); + +struct hw_db_qsl_idx hw_db_inline_qsl_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_qsl_data *data); +void hw_db_inline_qsl_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx); +void hw_db_inline_qsl_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_qsl_idx idx); + +struct hw_db_slc_lr_idx hw_db_inline_slc_lr_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_slc_lr_data *data); +void hw_db_inline_slc_lr_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_slc_lr_idx idx); +void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_slc_lr_idx idx); + +struct hw_db_tpe_idx hw_db_inline_tpe_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_tpe_data *data); +void hw_db_inline_tpe_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx); +void hw_db_inline_tpe_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx); + +struct hw_db_tpe_ext_idx hw_db_inline_tpe_ext_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_tpe_ext_data *data); +void hw_db_inline_tpe_ext_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_tpe_ext_idx idx); +void hw_db_inline_tpe_ext_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_tpe_ext_idx idx); + +struct hw_db_hsh_idx hw_db_inline_hsh_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_hsh_data *data); +void hw_db_inline_hsh_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx); +void hw_db_inline_hsh_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx); + +/**/ + +struct hw_db_cat_idx hw_db_inline_cat_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_cat_data *data); +void hw_db_inline_cat_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cat_idx idx); +void hw_db_inline_cat_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_cat_idx idx); + +/**/ + +struct hw_db_km_idx hw_db_inline_km_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_km_rcp_data *data); +void hw_db_inline_km_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_idx idx); +void hw_db_inline_km_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_idx idx); + +struct hw_db_km_ft hw_db_inline_km_ft_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_km_ft_data *data); +void hw_db_inline_km_ft_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_ft idx); +void hw_db_inline_km_ft_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_km_ft idx); + +/**/ + +struct hw_db_flm_idx hw_db_inline_flm_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_flm_rcp_data *data, int group); +void hw_db_inline_flm_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_idx idx); +void hw_db_inline_flm_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_idx idx); + +struct hw_db_flm_ft hw_db_inline_flm_ft_default(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_flm_ft_data *data); +struct hw_db_flm_ft hw_db_inline_flm_ft_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_flm_ft_data *data); +void hw_db_inline_flm_ft_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_flm_ft idx); +void hw_db_inline_flm_ft_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_flm_ft idx); + +struct hw_db_flm_scrub_idx hw_db_inline_scrub_add(struct flow_nic_dev *ndev, void *db_handle, + const struct hw_db_inline_scrub_data *data); +void hw_db_inline_scrub_ref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_flm_scrub_idx idx); +void hw_db_inline_scrub_deref(struct flow_nic_dev *ndev, void *db_handle, + struct hw_db_flm_scrub_idx idx); + +int hw_db_inline_setup_mbr_filter(struct flow_nic_dev *ndev, uint32_t cat_hw_id, uint32_t ft, + uint32_t qsl_hw_id); + +#endif /* _FLOW_API_HW_DB_INLINE_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c new file mode 100644 index 0000000000..a34839e00c --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c @@ -0,0 +1,6082 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include "ntlog.h" +#include "nt_util.h" + +#include "hw_mod_backend.h" +#include "flm_age_queue.h" +#include "flm_evt_queue.h" +#include "flm_lrn_queue.h" +#include "flow_api.h" +#include "flow_api_engine.h" +#include "flow_api_hw_db_inline.h" +#include "flow_api_profile_inline_config.h" +#include "flow_id_table.h" +#include "rte_flow.h" +#include "stream_binary_flow_api.h" + +#include "flow_api_profile_inline.h" +#include "ntnic_mod_reg.h" +#include +#include + +#define FLM_MTR_PROFILE_SIZE 0x100000 +#define FLM_MTR_STAT_SIZE 0x1000000 +#define UINT64_MSB ((uint64_t)1 << 63) + +#define DMA_BLOCK_SIZE 256 +#define DMA_OVERHEAD 20 +#define WORDS_PER_STA_DATA (sizeof(struct flm_v25_sta_data_s) / sizeof(uint32_t)) +#define MAX_STA_DATA_RECORDS_PER_READ ((DMA_BLOCK_SIZE - DMA_OVERHEAD) / WORDS_PER_STA_DATA) +#define WORDS_PER_INF_DATA (sizeof(struct flm_v25_inf_data_s) / sizeof(uint32_t)) +#define MAX_INF_DATA_RECORDS_PER_READ ((DMA_BLOCK_SIZE - DMA_OVERHEAD) / WORDS_PER_INF_DATA) + +#define NT_FLM_MISS_FLOW_TYPE 0 +#define NT_FLM_UNHANDLED_FLOW_TYPE 1 +#define NT_FLM_OP_UNLEARN 0 +#define NT_FLM_OP_LEARN 1 +#define NT_FLM_OP_RELEARN 2 + +#define NT_FLM_VIOLATING_MBR_FLOW_TYPE 15 +#define NT_VIOLATING_MBR_CFN 0 +#define NT_VIOLATING_MBR_QSL 1 + +#define RTE_ETH_RSS_UDP_COMBINED \ + (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX) + +#define RTE_ETH_RSS_TCP_COMBINED \ + (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX) + +#define NT_FLM_OP_UNLEARN 0 +#define NT_FLM_OP_LEARN 1 + +#define NT_FLM_MISS_FLOW_TYPE 0 +#define NT_FLM_UNHANDLED_FLOW_TYPE 1 +#define NT_FLM_VIOLATING_MBR_FLOW_TYPE 15 + +#define NT_VIOLATING_MBR_CFN 0 +#define NT_VIOLATING_MBR_QSL 1 + +#define POLICING_PARAMETER_OFFSET 4096 +#define SIZE_CONVERTER 1099.511627776 + +#define CELL_STATUS_UNINITIALIZED 0 +#define CELL_STATUS_INITIALIZING 1 +#define CELL_STATUS_INITIALIZED_TYPE_FLOW 2 +#define CELL_STATUS_INITIALIZED_TYPE_FLM 3 + +struct flm_mtr_stat_s { + struct dual_buckets_s *buckets; + atomic_uint_fast64_t n_pkt; + atomic_uint_fast64_t n_bytes; + uint64_t n_pkt_base; + uint64_t n_bytes_base; + atomic_uint_fast64_t stats_mask; + uint32_t flm_id; +}; + +struct flm_mtr_shared_stats_s { + struct flm_mtr_stat_s *stats; + uint32_t size; + int shared; +}; + +struct flm_flow_mtr_handle_s { + struct dual_buckets_s { + uint16_t rate_a; + uint16_t rate_b; + uint16_t size_a; + uint16_t size_b; + } dual_buckets[FLM_MTR_PROFILE_SIZE]; + + struct flm_mtr_shared_stats_s *port_stats[UINT8_MAX]; +}; + +static void *flm_lrn_queue_arr; + +static int flow_mtr_supported(struct flow_eth_dev *dev) +{ + return hw_mod_flm_present(&dev->ndev->be) && dev->ndev->be.flm.nb_variant == 2; +} + +static uint64_t flow_mtr_meter_policy_n_max(void) +{ + return FLM_MTR_PROFILE_SIZE; +} + +static inline uint64_t convert_policing_parameter(uint64_t value) +{ + uint64_t limit = POLICING_PARAMETER_OFFSET; + uint64_t shift = 0; + uint64_t res = value; + + while (shift < 15 && value >= limit) { + limit <<= 1; + ++shift; + } + + if (shift != 0) { + uint64_t tmp = POLICING_PARAMETER_OFFSET * (1 << (shift - 1)); + + if (tmp > value) { + res = 0; + + } else { + tmp = value - tmp; + res = tmp >> (shift - 1); + } + + if (res >= POLICING_PARAMETER_OFFSET) + res = POLICING_PARAMETER_OFFSET - 1; + + res = res | (shift << 12); + } + + return res; +} + +static int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id, + uint64_t bucket_rate_a, uint64_t bucket_size_a, uint64_t bucket_rate_b, + uint64_t bucket_size_b) +{ + struct flow_nic_dev *ndev = dev->ndev; + struct flm_flow_mtr_handle_s *handle = + (struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle; + struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id]; + + /* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */ + bucket_rate_a = (bucket_rate_a + 127) >> 7; + bucket_rate_b = (bucket_rate_b + 127) >> 7; + + buckets->rate_a = convert_policing_parameter(bucket_rate_a); + buckets->rate_b = convert_policing_parameter(bucket_rate_b); + + /* Round size down to 38-bit int */ + if (bucket_size_a > 0x3fffffffff) + bucket_size_a = 0x3fffffffff; + + if (bucket_size_b > 0x3fffffffff) + bucket_size_b = 0x3fffffffff; + + /* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */ + bucket_size_a = bucket_size_a / SIZE_CONVERTER; + bucket_size_b = bucket_size_b / SIZE_CONVERTER; + + buckets->size_a = convert_policing_parameter(bucket_size_a); + buckets->size_b = convert_policing_parameter(bucket_size_b); + + return 0; +} + +static int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop) +{ + (void)dev; + (void)policy_id; + (void)drop; + return 0; +} + +static uint32_t flow_mtr_meters_supported(struct flow_eth_dev *dev, uint8_t caller_id) +{ + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + return handle->port_stats[caller_id]->size; +} + +static int flow_mtr_create_meter(struct flow_eth_dev *dev, + uint8_t caller_id, + uint32_t mtr_id, + uint32_t profile_id, + uint32_t policy_id, + uint64_t stats_mask) +{ + (void)policy_id; + struct flm_v25_lrn_data_s *learn_record = NULL; + + rte_spinlock_lock(&dev->ndev->mtx); + + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + + while (learn_record == NULL) { + nt_os_wait_usec(1); + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + } + + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + + struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id]; + + memset(learn_record, 0x0, sizeof(struct flm_v25_lrn_data_s)); + + union flm_handles flm_h; + flm_h.idx = mtr_id; + uint32_t flm_id = ntnic_id_table_get_id(dev->ndev->id_table_handle, flm_h, caller_id, 2); + + learn_record->sw9 = flm_id; + learn_record->kid = 1; + + learn_record->rate = buckets->rate_a; + learn_record->size = buckets->size_a; + learn_record->fill = buckets->size_a; + + learn_record->ft_mbr = + NT_FLM_VIOLATING_MBR_FLOW_TYPE; /* FT to assign if MBR has been exceeded */ + + learn_record->ent = 1; + learn_record->op = 1; + learn_record->eor = 1; + + learn_record->id = flm_id; + + if (stats_mask) + learn_record->vol_idx = 1; + + flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr); + + struct flm_mtr_stat_s *mtr_stat = handle->port_stats[caller_id]->stats; + mtr_stat[mtr_id].buckets = buckets; + mtr_stat[mtr_id].flm_id = flm_id; + atomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask); + + rte_spinlock_unlock(&dev->ndev->mtx); + + return 0; +} + +static int flow_mtr_probe_meter(struct flow_eth_dev *dev, uint8_t caller_id, uint32_t mtr_id) +{ + struct flm_v25_lrn_data_s *learn_record = NULL; + + rte_spinlock_lock(&dev->ndev->mtx); + + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + + while (learn_record == NULL) { + nt_os_wait_usec(1); + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + } + + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + + struct flm_mtr_stat_s *mtr_stat = handle->port_stats[caller_id]->stats; + uint32_t flm_id = mtr_stat[mtr_id].flm_id; + + memset(learn_record, 0x0, sizeof(struct flm_v25_lrn_data_s)); + + learn_record->sw9 = flm_id; + learn_record->kid = 1; + + learn_record->ent = 1; + learn_record->op = 3; + learn_record->eor = 1; + + learn_record->id = flm_id; + + flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr); + + rte_spinlock_unlock(&dev->ndev->mtx); + + return 0; +} + +static int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint8_t caller_id, uint32_t mtr_id) +{ + struct flm_v25_lrn_data_s *learn_record = NULL; + + rte_spinlock_lock(&dev->ndev->mtx); + + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + + while (learn_record == NULL) { + nt_os_wait_usec(1); + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + } + + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + + struct flm_mtr_stat_s *mtr_stat = handle->port_stats[caller_id]->stats; + uint32_t flm_id = mtr_stat[mtr_id].flm_id; + + memset(learn_record, 0x0, sizeof(struct flm_v25_lrn_data_s)); + + learn_record->sw9 = flm_id; + learn_record->kid = 1; + + learn_record->ent = 1; + learn_record->op = 0; + /* Suppress generation of statistics INF_DATA */ + learn_record->nofi = 1; + learn_record->eor = 1; + + learn_record->id = flm_id; + + /* Clear statistics so stats_mask prevents updates of counters on deleted meters */ + atomic_store(&mtr_stat[mtr_id].stats_mask, 0); + atomic_store(&mtr_stat[mtr_id].n_bytes, 0); + atomic_store(&mtr_stat[mtr_id].n_pkt, 0); + mtr_stat[mtr_id].n_bytes_base = 0; + mtr_stat[mtr_id].n_pkt_base = 0; + mtr_stat[mtr_id].buckets = NULL; + + ntnic_id_table_free_id(dev->ndev->id_table_handle, flm_id); + + flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr); + + rte_spinlock_unlock(&dev->ndev->mtx); + + return 0; +} + +static int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint8_t caller_id, uint32_t mtr_id, + uint32_t adjust_value) +{ + struct flm_v25_lrn_data_s *learn_record = NULL; + + rte_spinlock_lock(&dev->ndev->mtx); + + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + + while (learn_record == NULL) { + nt_os_wait_usec(1); + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + } + + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + + struct flm_mtr_stat_s *mtr_stat = &handle->port_stats[caller_id]->stats[mtr_id]; + + memset(learn_record, 0x0, sizeof(struct flm_v25_lrn_data_s)); + + learn_record->sw9 = mtr_stat->flm_id; + learn_record->kid = 1; + + learn_record->rate = mtr_stat->buckets->rate_a; + learn_record->size = mtr_stat->buckets->size_a; + learn_record->adj = adjust_value; + + learn_record->ft_mbr = NT_FLM_VIOLATING_MBR_FLOW_TYPE; + + learn_record->ent = 1; + learn_record->op = 2; + learn_record->eor = 1; + + if (atomic_load(&mtr_stat->stats_mask)) + learn_record->vol_idx = 1; + + flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr); + + rte_spinlock_unlock(&dev->ndev->mtx); + + return 0; +} + +static void flm_setup_queues(void) +{ + flm_lrn_queue_arr = flm_lrn_queue_create(); + assert(flm_lrn_queue_arr != NULL); +} + +static void flm_free_queues(void) +{ + flm_lrn_queue_free(flm_lrn_queue_arr); +} + +static uint32_t flm_lrn_update(struct flow_eth_dev *dev, uint32_t *inf_word_cnt, + uint32_t *sta_word_cnt) +{ + read_record r = flm_lrn_queue_get_read_buffer(flm_lrn_queue_arr); + + if (r.num) { + uint32_t handled_records = 0; + + if (hw_mod_flm_lrn_data_set_flush(&dev->ndev->be, HW_FLM_FLOW_LRN_DATA, r.p, r.num, + &handled_records, inf_word_cnt, sta_word_cnt)) { + NT_LOG(ERR, FILTER, "Flow programming failed"); + + } else if (handled_records > 0) { + flm_lrn_queue_release_read_buffer(flm_lrn_queue_arr, handled_records); + } + } + + return r.num; +} + +static inline bool is_remote_caller(uint8_t caller_id, uint8_t *port) +{ + if (caller_id < MAX_VDPA_PORTS + 1) { + *port = caller_id; + return true; + } + + *port = caller_id - MAX_VDPA_PORTS - 1; + return false; +} + +static void flm_mtr_read_inf_records(struct flow_eth_dev *dev, uint32_t *data, uint32_t records) +{ + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + + for (uint32_t i = 0; i < records; ++i) { + struct flm_v25_inf_data_s *inf_data = + (struct flm_v25_inf_data_s *)&data[i * WORDS_PER_INF_DATA]; + uint8_t caller_id; + uint8_t type; + union flm_handles flm_h; + ntnic_id_table_find(dev->ndev->id_table_handle, inf_data->id, &flm_h, &caller_id, + &type); + + /* Check that received record hold valid meter statistics */ + if (type == 2) { + uint64_t mtr_id = flm_h.idx; + + if (mtr_id < handle->port_stats[caller_id]->size) { + struct flm_mtr_stat_s *mtr_stat = + handle->port_stats[caller_id]->stats; + + /* Don't update a deleted meter */ + uint64_t stats_mask = atomic_load(&mtr_stat[mtr_id].stats_mask); + + if (stats_mask) { + atomic_store(&mtr_stat[mtr_id].n_pkt, + inf_data->packets | UINT64_MSB); + atomic_store(&mtr_stat[mtr_id].n_bytes, inf_data->bytes); + atomic_store(&mtr_stat[mtr_id].n_pkt, inf_data->packets); + struct flm_info_event_s stat_data; + bool remote_caller; + uint8_t port; + + remote_caller = is_remote_caller(caller_id, &port); + + /* Save stat data to flm stat queue */ + stat_data.bytes = inf_data->bytes; + stat_data.packets = inf_data->packets; + stat_data.id = mtr_id; + stat_data.timestamp = inf_data->ts; + stat_data.cause = inf_data->cause; + flm_inf_queue_put(port, remote_caller, &stat_data); + } + } + + /* Check that received record hold valid flow data */ + + } else if (type == 1) { + switch (inf_data->cause) { + case INF_DATA_CAUSE_TIMEOUT_FLOW_DELETED: + case INF_DATA_CAUSE_TIMEOUT_FLOW_KEPT: { + struct flow_handle *fh = (struct flow_handle *)flm_h.p; + struct flm_age_event_s age_event; + uint8_t port; + + age_event.context = fh->context; + + is_remote_caller(caller_id, &port); + + flm_age_queue_put(caller_id, &age_event); + flm_age_event_set(port); + } + break; + + case INF_DATA_CAUSE_SW_UNLEARN: + case INF_DATA_CAUSE_NA: + case INF_DATA_CAUSE_PERIODIC_FLOW_INFO: + case INF_DATA_CAUSE_SW_PROBE: + default: + break; + } + } + } +} + +static void flm_mtr_read_sta_records(struct flow_eth_dev *dev, uint32_t *data, uint32_t records) +{ + for (uint32_t i = 0; i < records; ++i) { + struct flm_v25_sta_data_s *sta_data = + (struct flm_v25_sta_data_s *)&data[i * WORDS_PER_STA_DATA]; + uint8_t caller_id; + uint8_t type; + union flm_handles flm_h; + ntnic_id_table_find(dev->ndev->id_table_handle, sta_data->id, &flm_h, &caller_id, + &type); + + if (type == 1) { + uint8_t port; + bool remote_caller = is_remote_caller(caller_id, &port); + + rte_spinlock_lock(&dev->ndev->mtx); + ((struct flow_handle *)flm_h.p)->learn_ignored = 1; + rte_spinlock_unlock(&dev->ndev->mtx); + struct flm_status_event_s data = { + .flow = flm_h.p, + .learn_ignore = sta_data->lis, + .learn_failed = sta_data->lfs, + }; + + flm_sta_queue_put(port, remote_caller, &data); + } + } +} + +static uint32_t flm_update(struct flow_eth_dev *dev) +{ + static uint32_t inf_word_cnt; + static uint32_t sta_word_cnt; + + uint32_t inf_data[DMA_BLOCK_SIZE]; + uint32_t sta_data[DMA_BLOCK_SIZE]; + + if (inf_word_cnt >= WORDS_PER_INF_DATA || sta_word_cnt >= WORDS_PER_STA_DATA) { + uint32_t inf_records = inf_word_cnt / WORDS_PER_INF_DATA; + + if (inf_records > MAX_INF_DATA_RECORDS_PER_READ) + inf_records = MAX_INF_DATA_RECORDS_PER_READ; + + uint32_t sta_records = sta_word_cnt / WORDS_PER_STA_DATA; + + if (sta_records > MAX_STA_DATA_RECORDS_PER_READ) + sta_records = MAX_STA_DATA_RECORDS_PER_READ; + + hw_mod_flm_inf_sta_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_STA_DATA, + inf_data, inf_records * WORDS_PER_INF_DATA, + &inf_word_cnt, sta_data, + sta_records * WORDS_PER_STA_DATA, + &sta_word_cnt); + + if (inf_records > 0) + flm_mtr_read_inf_records(dev, inf_data, inf_records); + + if (sta_records > 0) + flm_mtr_read_sta_records(dev, sta_data, sta_records); + + return 1; + } + + if (flm_lrn_update(dev, &inf_word_cnt, &sta_word_cnt) != 0) + return 1; + + hw_mod_flm_buf_ctrl_update(&dev->ndev->be); + hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL, &inf_word_cnt); + hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_STA_AVAIL, &sta_word_cnt); + + return inf_word_cnt + sta_word_cnt; +} + +static void flm_mtr_read_stats(struct flow_eth_dev *dev, + uint8_t caller_id, + uint32_t id, + uint64_t *stats_mask, + uint64_t *green_pkt, + uint64_t *green_bytes, + int clear) +{ + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + struct flm_mtr_stat_s *mtr_stat = handle->port_stats[caller_id]->stats; + *stats_mask = atomic_load(&mtr_stat[id].stats_mask); + + if (*stats_mask) { + uint64_t pkt_1; + uint64_t pkt_2; + uint64_t nb; + + do { + do { + pkt_1 = atomic_load(&mtr_stat[id].n_pkt); + } while (pkt_1 & UINT64_MSB); + + nb = atomic_load(&mtr_stat[id].n_bytes); + pkt_2 = atomic_load(&mtr_stat[id].n_pkt); + } while (pkt_1 != pkt_2); + + *green_pkt = pkt_1 - mtr_stat[id].n_pkt_base; + *green_bytes = nb - mtr_stat[id].n_bytes_base; + + if (clear) { + mtr_stat[id].n_pkt_base = pkt_1; + mtr_stat[id].n_bytes_base = nb; + } + } +} + +static int rx_queue_idx_to_hw_id(const struct flow_eth_dev *dev, int id) +{ + for (int i = 0; i < dev->num_queues; ++i) + if (dev->rx_queue[i].id == id) + return dev->rx_queue[i].hw_id; + + return -1; +} + +/* + * Flow Matcher functionality + */ + +static int flm_sdram_calibrate(struct flow_nic_dev *ndev) +{ + int success = 0; + uint32_t fail_value = 0; + uint32_t value = 0; + + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0); + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10); + hw_mod_flm_control_flush(&ndev->be); + + /* Wait for ddr4 calibration/init done */ + for (uint32_t i = 0; i < 1000000; ++i) { + hw_mod_flm_status_update(&ndev->be); + hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIB_SUCCESS, &value); + hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIB_FAIL, &fail_value); + + if (value & 0x80000000) { + success = 1; + break; + } + + if (fail_value != 0) + break; + + nt_os_wait_usec(1); + } + + if (!success) { + NT_LOG(ERR, FILTER, "FLM initialization failed - SDRAM calibration failed"); + NT_LOG(ERR, FILTER, + "Calibration status: success 0x%08" PRIx32 " - fail 0x%08" PRIx32, + value, fail_value); + return -1; + } + + return 0; +} + +static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable) +{ + int success = 0; + + /* + * Make sure no lookup is performed during init, i.e. + * disable every category and disable FLM + */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0); + hw_mod_flm_control_flush(&ndev->be); + + for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i) + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0); + + hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1); + + /* Wait for FLM to enter Idle state */ + for (uint32_t i = 0; i < 1000000; ++i) { + uint32_t value = 0; + hw_mod_flm_status_update(&ndev->be); + hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value); + + if (value) { + success = 1; + break; + } + + nt_os_wait_usec(1); + } + + if (!success) { + NT_LOG(ERR, FILTER, "FLM initialization failed - Never idle"); + return -1; + } + + success = 0; + + /* Start SDRAM initialization */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1); + hw_mod_flm_control_flush(&ndev->be); + + for (uint32_t i = 0; i < 1000000; ++i) { + uint32_t value = 0; + hw_mod_flm_status_update(&ndev->be); + hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE, &value); + + if (value) { + success = 1; + break; + } + + nt_os_wait_usec(1); + } + + if (!success) { + NT_LOG(ERR, FILTER, + "FLM initialization failed - SDRAM initialization incomplete"); + return -1; + } + + /* Set the INIT value back to zero to clear the bit in the SW register cache */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0); + hw_mod_flm_control_flush(&ndev->be); + + /* Enable FLM */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable); + hw_mod_flm_control_flush(&ndev->be); + + int nb_rpp_per_ps = ndev->be.flm.nb_rpp_clock_in_ps; + int nb_load_aps_max = ndev->be.flm.nb_load_aps_max; + uint32_t scan_i_value = 0; + + if (NTNIC_SCANNER_LOAD > 0) { + scan_i_value = (1 / (nb_rpp_per_ps * 0.000000000001)) / + (nb_load_aps_max * NTNIC_SCANNER_LOAD); + } + + hw_mod_flm_scan_set(&ndev->be, HW_FLM_SCAN_I, scan_i_value); + hw_mod_flm_scan_flush(&ndev->be); + + return 0; +} + + + +struct flm_flow_key_def_s { + union { + struct { + uint64_t qw0_dyn : 7; + uint64_t qw0_ofs : 8; + uint64_t qw4_dyn : 7; + uint64_t qw4_ofs : 8; + uint64_t sw8_dyn : 7; + uint64_t sw8_ofs : 8; + uint64_t sw9_dyn : 7; + uint64_t sw9_ofs : 8; + uint64_t outer_proto : 1; + uint64_t inner_proto : 1; + uint64_t pad : 2; + }; + uint64_t data; + }; + uint32_t mask[10]; +}; + +/* + * Flow Matcher functionality + */ +static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def, unsigned int qw, + unsigned int dyn, unsigned int ofs) +{ + assert(qw < 2); + + if (qw == 0) { + key_def->qw0_dyn = dyn & 0x7f; + key_def->qw0_ofs = ofs & 0xff; + + } else { + key_def->qw4_dyn = dyn & 0x7f; + key_def->qw4_ofs = ofs & 0xff; + } +} + +static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def, unsigned int sw, + unsigned int dyn, unsigned int ofs) +{ + assert(sw < 2); + + if (sw == 0) { + key_def->sw8_dyn = dyn & 0x7f; + key_def->sw8_ofs = ofs & 0xff; + + } else { + key_def->sw9_dyn = dyn & 0x7f; + key_def->sw9_ofs = ofs & 0xff; + } +} + +static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port) +{ + return port + 1; +} + +static uint8_t get_port_from_port_id(const struct flow_nic_dev *ndev, uint32_t port_id) +{ + struct flow_eth_dev *dev = ndev->eth_base; + + while (dev) { + if (dev->port_id == port_id) + return dev->port; + + dev = dev->next; + } + + return UINT8_MAX; +} + +static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh) +{ + rte_spinlock_lock(&ndev->flow_mtx); + + if (ndev->flow_base) + ndev->flow_base->prev = fh; + + fh->next = ndev->flow_base; + fh->prev = NULL; + ndev->flow_base = fh; + + rte_spinlock_unlock(&ndev->flow_mtx); +} + +static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh) +{ + struct flow_handle *next = fh->next; + struct flow_handle *prev = fh->prev; + + rte_spinlock_lock(&ndev->flow_mtx); + + if (next && prev) { + prev->next = next; + next->prev = prev; + + } else if (next) { + ndev->flow_base = next; + next->prev = NULL; + + } else if (prev) { + prev->next = NULL; + + } else if (ndev->flow_base == fh) { + ndev->flow_base = NULL; + } + + rte_spinlock_unlock(&ndev->flow_mtx); +} + +static void nic_insert_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *fh) +{ + rte_spinlock_lock(&ndev->flow_mtx); + + if (ndev->flow_base_flm) + ndev->flow_base_flm->prev = fh; + + fh->next = ndev->flow_base_flm; + fh->prev = NULL; + ndev->flow_base_flm = fh; + + rte_spinlock_unlock(&ndev->flow_mtx); +} + +static void nic_remove_flow_flm(struct flow_nic_dev *ndev, struct flow_handle *fh_flm) +{ + struct flow_handle *next = fh_flm->next; + struct flow_handle *prev = fh_flm->prev; + + rte_spinlock_lock(&ndev->flow_mtx); + + if (next && prev) { + prev->next = next; + next->prev = prev; + + } else if (next) { + ndev->flow_base_flm = next; + next->prev = NULL; + + } else if (prev) { + prev->next = NULL; + + } else if (ndev->flow_base_flm == fh_flm) { + ndev->flow_base_flm = NULL; + } + + rte_spinlock_unlock(&ndev->flow_mtx); +} + +static inline struct nic_flow_def *prepare_nic_flow_def(struct nic_flow_def *fd) +{ + if (fd) { + fd->full_offload = -1; + fd->in_port_override = -1; + fd->mark = UINT32_MAX; + fd->jump_to_group = UINT32_MAX; + + memset(fd->mtr_ids, 0xff, sizeof(uint32_t) * MAX_FLM_MTRS_SUPPORTED); + + fd->l2_prot = -1; + fd->l3_prot = -1; + fd->l4_prot = -1; + fd->vlans = 0; + fd->tunnel_prot = -1; + fd->tunnel_l3_prot = -1; + fd->tunnel_l4_prot = -1; + fd->fragmentation = -1; + fd->ip_prot = -1; + fd->tunnel_ip_prot = -1; + + fd->non_empty = -1; + } + + return fd; +} + +static inline struct nic_flow_def *allocate_nic_flow_def(void) +{ + return prepare_nic_flow_def(calloc(1, sizeof(struct nic_flow_def))); +} + +static bool fd_has_empty_pattern(const struct nic_flow_def *fd) +{ + return fd && fd->vlans == 0 && fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 && + fd->tunnel_prot < 0 && fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0 && + fd->ip_prot < 0 && fd->tunnel_ip_prot < 0 && fd->non_empty < 0; +} + +static inline const void *memcpy_mask_if(void *dest, const void *src, const void *mask, + size_t count) +{ + if (mask == NULL) + return src; + + unsigned char *dest_ptr = (unsigned char *)dest; + const unsigned char *src_ptr = (const unsigned char *)src; + const unsigned char *mask_ptr = (const unsigned char *)mask; + + for (size_t i = 0; i < count; ++i) + dest_ptr[i] = src_ptr[i] & mask_ptr[i]; + + return dest; +} + +static int flm_flow_programming(struct flow_handle *fh, uint32_t flm_op) +{ + struct flm_v25_lrn_data_s *learn_record = NULL; + + if (fh->type != FLOW_HANDLE_TYPE_FLM) + return -1; + + if (flm_op == NT_FLM_OP_LEARN) { + union flm_handles flm_h; + flm_h.p = fh; + fh->flm_id = ntnic_id_table_get_id(fh->dev->ndev->id_table_handle, flm_h, + fh->caller_id, 1); + } + + uint32_t flm_id = fh->flm_id; + + if (flm_op == NT_FLM_OP_UNLEARN) { + ntnic_id_table_free_id(fh->dev->ndev->id_table_handle, flm_id); + + if (fh->learn_ignored == 1) + return 0; + } + + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + + while (learn_record == NULL) { + nt_os_wait_usec(1); + learn_record = + (struct flm_v25_lrn_data_s *) + flm_lrn_queue_get_write_buffer(flm_lrn_queue_arr); + } + + memset(learn_record, 0x0, sizeof(struct flm_v25_lrn_data_s)); + + learn_record->id = flm_id; + + learn_record->qw0[0] = fh->flm_data[9]; + learn_record->qw0[1] = fh->flm_data[8]; + learn_record->qw0[2] = fh->flm_data[7]; + learn_record->qw0[3] = fh->flm_data[6]; + learn_record->qw4[0] = fh->flm_data[5]; + learn_record->qw4[1] = fh->flm_data[4]; + learn_record->qw4[2] = fh->flm_data[3]; + learn_record->qw4[3] = fh->flm_data[2]; + learn_record->sw8 = fh->flm_data[1]; + learn_record->sw9 = fh->flm_data[0]; + learn_record->prot = fh->flm_prot; + + learn_record->mbr_idx1 = fh->flm_mtr_ids[0]; + learn_record->mbr_idx2 = fh->flm_mtr_ids[1]; + learn_record->mbr_idx3 = fh->flm_mtr_ids[2]; + learn_record->mbr_idx4 = fh->flm_mtr_ids[3]; + + /* Last non-zero mtr is used for statistics */ + uint8_t mbrs = 0; + + while (mbrs < MAX_FLM_MTRS_SUPPORTED && fh->flm_mtr_ids[mbrs] != 0) + ++mbrs; + + learn_record->vol_idx = mbrs; + + learn_record->nat_ip = fh->flm_nat_ipv4; + learn_record->nat_port = fh->flm_nat_port; + learn_record->nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0; + + learn_record->dscp = fh->flm_dscp; + learn_record->teid = fh->flm_teid; + learn_record->qfi = fh->flm_qfi; + learn_record->rqi = fh->flm_rqi; + /* Lower 10 bits used for RPL EXT PTR */ + learn_record->color = fh->flm_rpl_ext_ptr & 0x3ff; + /* Bit [13:10] used for MTU recipe */ + learn_record->color |= (fh->flm_mtu_fragmentation_recipe & 0xf) << 10; + + learn_record->ent = 0; + learn_record->op = flm_op & 0xf; + /* Suppress generation of statistics INF_DATA */ + learn_record->nofi = 1; + learn_record->prio = fh->flm_prio & 0x3; + learn_record->ft = fh->flm_ft; + learn_record->kid = fh->flm_kid; + learn_record->eor = 1; + learn_record->scrub_prof = fh->flm_scrub_prof; + + flm_lrn_queue_release_write_buffer(flm_lrn_queue_arr); + return 0; +} + +static inline const void *memcpy_or(void *dest, const void *src, size_t count) +{ + unsigned char *dest_ptr = (unsigned char *)dest; + const unsigned char *src_ptr = (const unsigned char *)src; + + for (size_t i = 0; i < count; ++i) + dest_ptr[i] |= src_ptr[i]; + + return dest; +} + +/* + * This function must be callable without locking any mutexes + */ +static int interpret_flow_actions(const struct flow_eth_dev *dev, + const struct rte_flow_action action[], + const struct rte_flow_action *action_mask, + struct nic_flow_def *fd, + struct rte_flow_error *error, + uint32_t *num_dest_port, + uint32_t *num_queues) +{ + int mtr_count = 0; + + unsigned int encap_decap_order = 0; + + uint64_t modify_field_use_flags = 0x0; + + *num_dest_port = 0; + *num_queues = 0; + + if (action == NULL) { + flow_nic_set_error(ERR_FAILED, error); + NT_LOG(ERR, FILTER, "Flow actions missing"); + return -1; + } + + /* + * Gather flow match + actions and convert into internal flow definition structure (struct + * nic_flow_def_s) This is the 1st step in the flow creation - validate, convert and + * prepare + */ + for (int aidx = 0; action[aidx].type != RTE_FLOW_ACTION_TYPE_END; ++aidx) { + switch (action[aidx].type) { + case RTE_FLOW_ACTION_TYPE_PORT_ID: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_PORT_ID", dev); + + if (action[aidx].conf) { + struct rte_flow_action_port_id port_id_tmp; + const struct rte_flow_action_port_id *port_id = + memcpy_mask_if(&port_id_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_port_id)); + + if (*num_dest_port > 0) { + NT_LOG(ERR, FILTER, + "Multiple port_id actions for one flow is not supported"); + flow_nic_set_error(ERR_ACTION_MULTIPLE_PORT_ID_UNSUPPORTED, + error); + return -1; + } + + uint8_t port = get_port_from_port_id(dev->ndev, port_id->id); + + if (fd->dst_num_avail == MAX_OUTPUT_DEST) { + NT_LOG(ERR, FILTER, "Too many output destinations"); + flow_nic_set_error(ERR_OUTPUT_TOO_MANY, error); + return -1; + } + + if (port >= dev->ndev->be.num_phy_ports) { + NT_LOG(ERR, FILTER, "Phy port out of range"); + flow_nic_set_error(ERR_OUTPUT_INVALID, error); + return -1; + } + + /* New destination port to add */ + fd->dst_id[fd->dst_num_avail].owning_port_id = port_id->id; + fd->dst_id[fd->dst_num_avail].type = PORT_PHY; + fd->dst_id[fd->dst_num_avail].id = (int)port; + fd->dst_id[fd->dst_num_avail].active = 1; + fd->dst_num_avail++; + + fd->flm_mtu_fragmentation_recipe = + convert_port_to_ifr_mtu_recipe(port); + + if (fd->full_offload < 0) + fd->full_offload = 1; + + *num_dest_port += 1; + + NT_LOG(DBG, FILTER, "Phy port ID: %i", (int)port); + } + + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_QUEUE", dev); + + if (action[aidx].conf) { + struct rte_flow_action_queue queue_tmp; + const struct rte_flow_action_queue *queue = + memcpy_mask_if(&queue_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_queue)); + + int hw_id = rx_queue_idx_to_hw_id(dev, queue->index); + + fd->dst_id[fd->dst_num_avail].owning_port_id = dev->port; + fd->dst_id[fd->dst_num_avail].id = hw_id; + fd->dst_id[fd->dst_num_avail].type = PORT_VIRT; + fd->dst_id[fd->dst_num_avail].active = 1; + fd->dst_num_avail++; + + NT_LOG(DBG, FILTER, + "Dev:%p: RTE_FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u", + dev, dev->port, queue->index, hw_id); + + fd->full_offload = 0; + *num_queues += 1; + } + + break; + + case RTE_FLOW_ACTION_TYPE_RSS: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_RSS", dev); + + if (action[aidx].conf) { + struct rte_flow_action_rss rss_tmp; + const struct rte_flow_action_rss *rss = + memcpy_mask_if(&rss_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_rss)); + + if (rss->key_len > MAX_RSS_KEY_LEN) { + NT_LOG(ERR, FILTER, + "ERROR: RSS hash key length %u exceeds maximum value %u", + rss->key_len, MAX_RSS_KEY_LEN); + flow_nic_set_error(ERR_RSS_TOO_LONG_KEY, error); + return -1; + } + + for (uint32_t i = 0; i < rss->queue_num; ++i) { + int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]); + + fd->dst_id[fd->dst_num_avail].owning_port_id = dev->port; + fd->dst_id[fd->dst_num_avail].id = hw_id; + fd->dst_id[fd->dst_num_avail].type = PORT_VIRT; + fd->dst_id[fd->dst_num_avail].active = 1; + fd->dst_num_avail++; + } + + fd->hsh.func = rss->func; + fd->hsh.types = rss->types; + fd->hsh.key = rss->key; + fd->hsh.key_len = rss->key_len; + + NT_LOG(DBG, FILTER, + "Dev:%p: RSS func: %d, types: 0x%" PRIX64 ", key_len: %d", + dev, rss->func, rss->types, rss->key_len); + + fd->full_offload = 0; + *num_queues += rss->queue_num; + } + + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_MARK", dev); + + if (action[aidx].conf) { + struct rte_flow_action_mark mark_tmp; + const struct rte_flow_action_mark *mark = + memcpy_mask_if(&mark_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_mark)); + + fd->mark = mark->id; + NT_LOG(DBG, FILTER, "Mark: %i", mark->id); + } + + break; + + case RTE_FLOW_ACTION_TYPE_JUMP: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_JUMP", dev); + + if (action[aidx].conf) { + struct rte_flow_action_jump jump_tmp; + const struct rte_flow_action_jump *jump = + memcpy_mask_if(&jump_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_jump)); + + fd->jump_to_group = jump->group; + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_JUMP: group %u", + dev, jump->group); + } + + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_DROP", dev); + + if (action[aidx].conf) { + fd->dst_id[fd->dst_num_avail].owning_port_id = 0; + fd->dst_id[fd->dst_num_avail].id = 0; + fd->dst_id[fd->dst_num_avail].type = PORT_NONE; + fd->dst_num_avail++; + } + + break; + + case RTE_FLOW_ACTION_TYPE_METER: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_METER", dev); + + if (action[aidx].conf) { + struct rte_flow_action_meter meter_tmp; + const struct rte_flow_action_meter *meter = + memcpy_mask_if(&meter_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_meter)); + + if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) { + NT_LOG(ERR, FILTER, + "ERROR: - Number of METER actions exceeds %d.", + MAX_FLM_MTRS_SUPPORTED); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + fd->mtr_ids[mtr_count++] = meter->mtr_id; + } + + break; + + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_RAW_ENCAP", dev); + + if (action[aidx].conf) { + const struct flow_action_raw_encap *encap = + (const struct flow_action_raw_encap *)action[aidx].conf; + const struct flow_action_raw_encap *encap_mask = action_mask + ? (const struct flow_action_raw_encap *)action_mask[aidx] + .conf + : NULL; + const struct rte_flow_item *items = encap->items; + + if (encap_decap_order != 1) { + NT_LOG(ERR, FILTER, + "ERROR: - RAW_ENCAP must follow RAW_DECAP."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + if (encap->size == 0 || encap->size > 255 || + encap->item_count < 2) { + NT_LOG(ERR, FILTER, + "ERROR: - RAW_ENCAP data/size invalid."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + encap_decap_order = 2; + + fd->tun_hdr.len = (uint8_t)encap->size; + + if (encap_mask) { + memcpy_mask_if(fd->tun_hdr.d.hdr8, encap->data, + encap_mask->data, fd->tun_hdr.len); + + } else { + memcpy(fd->tun_hdr.d.hdr8, encap->data, fd->tun_hdr.len); + } + + while (items->type != RTE_FLOW_ITEM_TYPE_END) { + switch (items->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + fd->tun_hdr.l2_len = 14; + break; + + case RTE_FLOW_ITEM_TYPE_VLAN: + fd->tun_hdr.nb_vlans += 1; + fd->tun_hdr.l2_len += 4; + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + fd->tun_hdr.ip_version = 4; + fd->tun_hdr.l3_len = sizeof(struct rte_ipv4_hdr); + fd->tun_hdr.new_outer = 1; + + /* Patch length */ + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 2] = 0x07; + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 3] = 0xfd; + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + fd->tun_hdr.ip_version = 6; + fd->tun_hdr.l3_len = sizeof(struct rte_ipv6_hdr); + fd->tun_hdr.new_outer = 1; + + /* Patch length */ + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 4] = 0x07; + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + 5] = 0xfd; + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + fd->tun_hdr.l4_len = sizeof(struct rte_sctp_hdr); + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + fd->tun_hdr.l4_len = sizeof(struct rte_tcp_hdr); + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + fd->tun_hdr.l4_len = sizeof(struct rte_udp_hdr); + + /* Patch length */ + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + + fd->tun_hdr.l3_len + 4] = 0x07; + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + + fd->tun_hdr.l3_len + 5] = 0xfd; + break; + + case RTE_FLOW_ITEM_TYPE_ICMP: + fd->tun_hdr.l4_len = sizeof(struct rte_icmp_hdr); + break; + + case RTE_FLOW_ITEM_TYPE_ICMP6: + fd->tun_hdr.l4_len = + sizeof(struct rte_flow_item_icmp6); + break; + + case RTE_FLOW_ITEM_TYPE_GTP: + /* Patch length */ + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + + fd->tun_hdr.l3_len + + fd->tun_hdr.l4_len + 2] = 0x07; + fd->tun_hdr.d.hdr8[fd->tun_hdr.l2_len + + fd->tun_hdr.l3_len + + fd->tun_hdr.l4_len + 3] = 0xfd; + break; + + default: + break; + } + + items++; + } + + if (fd->tun_hdr.nb_vlans > 3) { + NT_LOG(ERR, FILTER, + "ERROR: - Encapsulation with %d vlans not supported.", + (int)fd->tun_hdr.nb_vlans); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + /* Convert encap data to 128-bit little endian */ + for (size_t i = 0; i < (encap->size + 15) / 16; ++i) { + uint8_t *data = fd->tun_hdr.d.hdr8 + i * 16; + + for (unsigned int j = 0; j < 8; ++j) { + uint8_t t = data[j]; + data[j] = data[15 - j]; + data[15 - j] = t; + } + } + } + + break; + + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_RAW_DECAP", dev); + + if (action[aidx].conf) { + /* Mask is N/A for RAW_DECAP */ + const struct flow_action_raw_decap *decap = + (const struct flow_action_raw_decap *)action[aidx].conf; + + if (encap_decap_order != 0) { + NT_LOG(ERR, FILTER, + "ERROR: - RAW_ENCAP must follow RAW_DECAP."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + if (decap->item_count < 2) { + NT_LOG(ERR, FILTER, + "ERROR: - RAW_DECAP must decap something."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + encap_decap_order = 1; + + switch (decap->items[decap->item_count - 2].type) { + case RTE_FLOW_ITEM_TYPE_ETH: + case RTE_FLOW_ITEM_TYPE_VLAN: + fd->header_strip_end_dyn = DYN_L3; + fd->header_strip_end_ofs = 0; + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + fd->header_strip_end_dyn = DYN_L4; + fd->header_strip_end_ofs = 0; + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_UDP: + case RTE_FLOW_ITEM_TYPE_ICMP: + case RTE_FLOW_ITEM_TYPE_ICMP6: + fd->header_strip_end_dyn = DYN_L4_PAYLOAD; + fd->header_strip_end_ofs = 0; + break; + + case RTE_FLOW_ITEM_TYPE_GTP: + fd->header_strip_end_dyn = DYN_TUN_L3; + fd->header_strip_end_ofs = 0; + break; + + default: + fd->header_strip_end_dyn = DYN_L2; + fd->header_strip_end_ofs = 0; + break; + } + } + + break; + + case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_MODIFY_FIELD", dev); + { + /* Note: This copy method will not work for FLOW_FIELD_POINTER */ + struct rte_flow_action_modify_field modify_field_tmp; + const struct rte_flow_action_modify_field *modify_field = + memcpy_mask_if(&modify_field_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_modify_field)); + + uint64_t modify_field_use_flag = 0; + + if (modify_field->src.field != RTE_FLOW_FIELD_VALUE) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD only src type VALUE is supported."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + if (modify_field->dst.level > 2) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD only dst level 0, 1, and 2 is supported."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + if (modify_field->dst.field == RTE_FLOW_FIELD_IPV4_TTL || + modify_field->dst.field == RTE_FLOW_FIELD_IPV6_HOPLIMIT) { + if (modify_field->operation != RTE_FLOW_MODIFY_SUB) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + if (fd->ttl_sub_enable) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD TTL/HOPLIMIT resource already in use."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + fd->ttl_sub_enable = 1; + fd->ttl_sub_ipv4 = + (modify_field->dst.field == RTE_FLOW_FIELD_IPV4_TTL) + ? 1 + : 0; + fd->ttl_sub_outer = (modify_field->dst.level <= 1) ? 1 : 0; + + } else { + if (modify_field->operation != RTE_FLOW_MODIFY_SET) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD only operation SET is supported in general."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + if (fd->modify_field_count >= + dev->ndev->be.tpe.nb_cpy_writers) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD exceeded maximum of %u MODIFY_FIELD actions.", + dev->ndev->be.tpe.nb_cpy_writers); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + int mod_outer = modify_field->dst.level <= 1; + + switch (modify_field->dst.field) { + case RTE_FLOW_FIELD_IPV4_DSCP: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_DSCP_IPV4; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L3 : DYN_TUN_L3; + fd->modify_field[fd->modify_field_count].ofs = 1; + fd->modify_field[fd->modify_field_count].len = 1; + break; + + case RTE_FLOW_FIELD_IPV6_DSCP: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_DSCP_IPV6; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L3 : DYN_TUN_L3; + fd->modify_field[fd->modify_field_count].ofs = 0; + /* + * len=2 is needed because + * IPv6 DSCP overlaps 2 bytes. + */ + fd->modify_field[fd->modify_field_count].len = 2; + break; + + case RTE_FLOW_FIELD_GTP_PSC_QFI: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_RQI_QFI; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L4_PAYLOAD + : DYN_TUN_L4_PAYLOAD; + fd->modify_field[fd->modify_field_count].ofs = 14; + fd->modify_field[fd->modify_field_count].len = 1; + break; + + case RTE_FLOW_FIELD_IPV4_SRC: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_IPV4; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L3 : DYN_TUN_L3; + fd->modify_field[fd->modify_field_count].ofs = 12; + fd->modify_field[fd->modify_field_count].len = 4; + break; + + case RTE_FLOW_FIELD_IPV4_DST: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_IPV4; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L3 : DYN_TUN_L3; + fd->modify_field[fd->modify_field_count].ofs = 16; + fd->modify_field[fd->modify_field_count].len = 4; + break; + + case RTE_FLOW_FIELD_TCP_PORT_SRC: + case RTE_FLOW_FIELD_UDP_PORT_SRC: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_PORT; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L4 : DYN_TUN_L4; + fd->modify_field[fd->modify_field_count].ofs = 0; + fd->modify_field[fd->modify_field_count].len = 2; + break; + + case RTE_FLOW_FIELD_TCP_PORT_DST: + case RTE_FLOW_FIELD_UDP_PORT_DST: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_PORT; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L4 : DYN_TUN_L4; + fd->modify_field[fd->modify_field_count].ofs = 2; + fd->modify_field[fd->modify_field_count].len = 2; + break; + + case RTE_FLOW_FIELD_GTP_TEID: + fd->modify_field[fd->modify_field_count].select = + CPY_SELECT_TEID; + fd->modify_field[fd->modify_field_count].dyn = + mod_outer ? DYN_L4_PAYLOAD + : DYN_TUN_L4_PAYLOAD; + fd->modify_field[fd->modify_field_count].ofs = 4; + fd->modify_field[fd->modify_field_count].len = 4; + break; + + default: + NT_LOG(ERR, FILTER, + "MODIFY_FIELD dst type is not supported."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + modify_field_use_flag = 1 + << fd->modify_field[fd->modify_field_count].select; + + if (modify_field_use_flag & modify_field_use_flags) { + NT_LOG(ERR, FILTER, + "MODIFY_FIELD dst type hardware resource already used."); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + + memcpy(fd->modify_field[fd->modify_field_count].value8, + modify_field->src.value, 16); + + fd->modify_field[fd->modify_field_count].level = + modify_field->dst.level; + + modify_field_use_flags |= modify_field_use_flag; + fd->modify_field_count += 1; + } + } + + break; + + case RTE_FLOW_ACTION_TYPE_AGE: + NT_LOG(DBG, FILTER, "Dev:%p: RTE_FLOW_ACTION_TYPE_AGE", dev); + + if (action[aidx].conf) { + struct rte_flow_action_age age_tmp; + const struct rte_flow_action_age *age = + memcpy_mask_if(&age_tmp, action[aidx].conf, + action_mask ? action_mask[aidx].conf : NULL, + sizeof(struct rte_flow_action_age)); + fd->age.timeout = hw_mod_flm_scrub_timeout_encode(age->timeout); + fd->age.context = age->context; + NT_LOG(DBG, FILTER, + "normalized timeout: %u, original timeout: %u, context: %p", + hw_mod_flm_scrub_timeout_decode(fd->age.timeout), + age->timeout, fd->age.context); + } + + break; + + default: + NT_LOG(ERR, FILTER, "Invalid or unsupported flow action received - %i", + action[aidx].type); + flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error); + return -1; + } + } + + if (!(encap_decap_order == 0 || encap_decap_order == 2)) { + NT_LOG(ERR, FILTER, "Invalid encap/decap actions"); + return -1; + } + + return 0; +} + +static int interpret_flow_elements(const struct flow_eth_dev *dev, + const struct rte_flow_item elem[], + struct nic_flow_def *fd __rte_unused, + struct rte_flow_error *error, + uint16_t implicit_vlan_vid __rte_unused, + uint32_t *in_port_id, + uint32_t *packet_data, + uint32_t *packet_mask, + struct flm_flow_key_def_s *key_def) +{ + uint32_t any_count = 0; + + unsigned int qw_counter = 0; + unsigned int sw_counter = 0; + + *in_port_id = UINT32_MAX; + + memset(packet_data, 0x0, sizeof(uint32_t) * 10); + memset(packet_mask, 0x0, sizeof(uint32_t) * 10); + memset(key_def, 0x0, sizeof(struct flm_flow_key_def_s)); + + if (elem == NULL) { + flow_nic_set_error(ERR_FAILED, error); + NT_LOG(ERR, FILTER, "Flow items missing"); + return -1; + } + + if (implicit_vlan_vid > 0) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = 0x0fff; + sw_data[0] = implicit_vlan_vid & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0); + set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0); + sw_counter += 1; + + fd->vlans += 1; + } + + int qw_reserved_mac = 0; + int qw_reserved_ipv6 = 0; + + for (int eidx = 0; elem[eidx].type != RTE_FLOW_ITEM_TYPE_END; ++eidx) { + switch (elem[eidx].type) { + case RTE_FLOW_ITEM_TYPE_ETH: { + const struct rte_ether_hdr *eth_spec = + (const struct rte_ether_hdr *)elem[eidx].spec; + const struct rte_ether_hdr *eth_mask = + (const struct rte_ether_hdr *)elem[eidx].mask; + + if (eth_spec != NULL && eth_mask != NULL) { + if (is_non_zero(eth_mask->dst_addr.addr_bytes, 6) || + is_non_zero(eth_mask->src_addr.addr_bytes, 6)) { + qw_reserved_mac += 1; + } + } + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: { + const struct rte_flow_item_ipv6 *ipv6_spec = + (const struct rte_flow_item_ipv6 *)elem[eidx].spec; + const struct rte_flow_item_ipv6 *ipv6_mask = + (const struct rte_flow_item_ipv6 *)elem[eidx].mask; + + if (ipv6_spec != NULL && ipv6_mask != NULL) { + if (is_non_zero(&ipv6_spec->hdr.src_addr, 16)) + qw_reserved_ipv6 += 1; + + if (is_non_zero(&ipv6_spec->hdr.dst_addr, 16)) + qw_reserved_ipv6 += 1; + } + } + break; + + default: + break; + } + } + + int qw_free = 2 - qw_reserved_mac - qw_reserved_ipv6; + + if (qw_free < 0) { + NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + for (int eidx = 0; elem[eidx].type != RTE_FLOW_ITEM_TYPE_END; ++eidx) { + switch (elem[eidx].type) { + case RTE_FLOW_ITEM_TYPE_ANY: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_ANY", + dev->ndev->adapter_no, dev->port); + any_count += 1; + break; + + case RTE_FLOW_ITEM_TYPE_ETH: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_ETH", + dev->ndev->adapter_no, dev->port); + { + const struct rte_ether_hdr *eth_spec = + (const struct rte_ether_hdr *)elem[eidx].spec; + const struct rte_ether_hdr *eth_mask = + (const struct rte_ether_hdr *)elem[eidx].mask; + + if (any_count > 0) { + NT_LOG(ERR, FILTER, + "Tunneled L2 ethernet not supported"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (eth_spec == NULL || eth_mask == NULL) { + fd->l2_prot = PROT_L2_ETH2; + break; + } + + int non_zero = is_non_zero(eth_mask->dst_addr.addr_bytes, 6) || + is_non_zero(eth_mask->src_addr.addr_bytes, 6); + + if (non_zero || + (eth_mask->ether_type != 0 && sw_counter >= 2)) { + if (qw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = ((eth_spec->dst_addr.addr_bytes[0] & + eth_mask->dst_addr.addr_bytes[0]) << 24) + + ((eth_spec->dst_addr.addr_bytes[1] & + eth_mask->dst_addr.addr_bytes[1]) << 16) + + ((eth_spec->dst_addr.addr_bytes[2] & + eth_mask->dst_addr.addr_bytes[2]) << 8) + + (eth_spec->dst_addr.addr_bytes[3] & + eth_mask->dst_addr.addr_bytes[3]); + + qw_data[1] = ((eth_spec->dst_addr.addr_bytes[4] & + eth_mask->dst_addr.addr_bytes[4]) << 24) + + ((eth_spec->dst_addr.addr_bytes[5] & + eth_mask->dst_addr.addr_bytes[5]) << 16) + + ((eth_spec->src_addr.addr_bytes[0] & + eth_mask->src_addr.addr_bytes[0]) << 8) + + (eth_spec->src_addr.addr_bytes[1] & + eth_mask->src_addr.addr_bytes[1]); + + qw_data[2] = ((eth_spec->src_addr.addr_bytes[2] & + eth_mask->src_addr.addr_bytes[2]) << 24) + + ((eth_spec->src_addr.addr_bytes[3] & + eth_mask->src_addr.addr_bytes[3]) << 16) + + ((eth_spec->src_addr.addr_bytes[4] & + eth_mask->src_addr.addr_bytes[4]) << 8) + + (eth_spec->src_addr.addr_bytes[5] & + eth_mask->src_addr.addr_bytes[5]); + + qw_data[3] = ntohs(eth_spec->ether_type & + eth_mask->ether_type) << 16; + + qw_mask[0] = (eth_mask->dst_addr.addr_bytes[0] << 24) + + (eth_mask->dst_addr.addr_bytes[1] << 16) + + (eth_mask->dst_addr.addr_bytes[2] << 8) + + eth_mask->dst_addr.addr_bytes[3]; + + qw_mask[1] = (eth_mask->dst_addr.addr_bytes[4] << 24) + + (eth_mask->dst_addr.addr_bytes[5] << 16) + + (eth_mask->src_addr.addr_bytes[0] << 8) + + eth_mask->src_addr.addr_bytes[1]; + + qw_mask[2] = (eth_mask->src_addr.addr_bytes[2] << 24) + + (eth_mask->src_addr.addr_bytes[3] << 16) + + (eth_mask->src_addr.addr_bytes[4] << 8) + + eth_mask->src_addr.addr_bytes[5]; + + qw_mask[3] = ntohs(eth_mask->ether_type) << 16; + + km_add_match_elem(&fd->km, + &qw_data[(size_t)(qw_counter * 4)], + &qw_mask[(size_t)(qw_counter * 4)], 4, DYN_L2, 0); + set_key_def_qw(key_def, qw_counter, DYN_L2, 0); + qw_counter += 1; + + if (!non_zero) + qw_free -= 1; + + } else if (eth_mask->ether_type != 0) { + if (sw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = ntohs(eth_mask->ether_type) << 16; + sw_data[0] = ntohs(eth_spec->ether_type) << 16 & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], + &sw_mask[0], 1, DYN_L2, 12); + set_key_def_sw(key_def, sw_counter, DYN_L2, 12); + sw_counter += 1; + } + + fd->l2_prot = PROT_L2_ETH2; + } + + break; + + case RTE_FLOW_ITEM_TYPE_VLAN: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_VLAN", + dev->ndev->adapter_no, dev->port); + { + const struct rte_vlan_hdr *vlan_spec = + (const struct rte_vlan_hdr *)elem[eidx].spec; + const struct rte_vlan_hdr *vlan_mask = + (const struct rte_vlan_hdr *)elem[eidx].mask; + + if (vlan_spec == NULL || vlan_mask == NULL) { + fd->vlans += 1; + break; + } + + if (!vlan_mask->vlan_tci && !vlan_mask->eth_proto) + break; + + if (implicit_vlan_vid > 0) { + NT_LOG(ERR, FILTER, + "Multiple VLANs not supported for implicit VLAN patterns."); + flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, + error); + return -1; + } + + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = ntohs(vlan_mask->vlan_tci) << 16 | + ntohs(vlan_mask->eth_proto); + sw_data[0] = ntohs(vlan_spec->vlan_tci) << 16 | + ntohs(vlan_spec->eth_proto); + sw_data[0] &= sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, + DYN_FIRST_VLAN, 2 + 4 * fd->vlans); + set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, + 2 + 4 * fd->vlans); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = ntohs(vlan_spec->vlan_tci) << 16 | + ntohs(vlan_spec->eth_proto); + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = ntohs(vlan_mask->vlan_tci) << 16 | + ntohs(vlan_mask->eth_proto); + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, + DYN_FIRST_VLAN, 2 + 4 * fd->vlans); + set_key_def_qw(key_def, qw_counter, DYN_FIRST_VLAN, + 2 + 4 * fd->vlans); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + fd->vlans += 1; + } + + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_IPV4", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_ipv4 *ipv4_spec = + (const struct rte_flow_item_ipv4 *)elem[eidx].spec; + const struct rte_flow_item_ipv4 *ipv4_mask = + (const struct rte_flow_item_ipv4 *)elem[eidx].mask; + + if (ipv4_spec == NULL || ipv4_mask == NULL) { + if (any_count > 0 || fd->l3_prot != -1) + fd->tunnel_l3_prot = PROT_TUN_L3_IPV4; + else + fd->l3_prot = PROT_L3_IPV4; + break; + } + + if (ipv4_mask->hdr.version_ihl != 0 || + ipv4_mask->hdr.type_of_service != 0 || + ipv4_mask->hdr.total_length != 0 || + ipv4_mask->hdr.packet_id != 0 || + (ipv4_mask->hdr.fragment_offset != 0 && + (ipv4_spec->hdr.fragment_offset != 0xffff || + ipv4_mask->hdr.fragment_offset != 0xffff)) || + ipv4_mask->hdr.time_to_live != 0 || + ipv4_mask->hdr.hdr_checksum != 0) { + NT_LOG(ERR, FILTER, + "Requested IPv4 field not support by running SW version."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (ipv4_spec->hdr.fragment_offset == 0xffff && + ipv4_mask->hdr.fragment_offset == 0xffff) { + fd->fragmentation = 0xfe; + } + + int match_cnt = (ipv4_mask->hdr.src_addr != 0) + + (ipv4_mask->hdr.dst_addr != 0) + + (ipv4_mask->hdr.next_proto_id != 0); + + if (match_cnt <= 0) { + if (any_count > 0 || fd->l3_prot != -1) + fd->tunnel_l3_prot = PROT_TUN_L3_IPV4; + else + fd->l3_prot = PROT_L3_IPV4; + break; + } + + if (qw_free > 0 && + (match_cnt >= 2 || + (match_cnt == 1 && sw_counter >= 2))) { + if (qw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of QW resources."); + flow_nic_set_error(ERR_FAILED, + error); + return -1; + } + + uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4]; + + qw_mask[0] = 0; + qw_data[0] = 0; + + qw_mask[1] = ipv4_mask->hdr.next_proto_id << 16; + qw_data[1] = ipv4_spec->hdr.next_proto_id + << 16 & qw_mask[1]; + + qw_mask[2] = ntohl(ipv4_mask->hdr.src_addr); + qw_mask[3] = ntohl(ipv4_mask->hdr.dst_addr); + + qw_data[2] = ntohl(ipv4_spec->hdr.src_addr) & qw_mask[2]; + qw_data[3] = ntohl(ipv4_spec->hdr.dst_addr) & qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, + any_count > 0 ? DYN_TUN_L3 : DYN_L3, 4); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 4); + qw_counter += 1; + qw_free -= 1; + + if (any_count > 0 || fd->l3_prot != -1) + fd->tunnel_l3_prot = PROT_TUN_L3_IPV4; + else + fd->l3_prot = PROT_L3_IPV4; + break; + } + + if (ipv4_mask->hdr.src_addr) { + if (sw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = ntohl(ipv4_mask->hdr.src_addr); + sw_data[0] = ntohl(ipv4_spec->hdr.src_addr) & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, + any_count > 0 ? DYN_TUN_L3 : DYN_L3, 12); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 12); + sw_counter += 1; + } + + if (ipv4_mask->hdr.dst_addr) { + if (sw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = ntohl(ipv4_mask->hdr.dst_addr); + sw_data[0] = ntohl(ipv4_spec->hdr.dst_addr) & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, + any_count > 0 ? DYN_TUN_L3 : DYN_L3, 16); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 16); + sw_counter += 1; + } + + if (ipv4_mask->hdr.next_proto_id) { + if (sw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = ipv4_mask->hdr.next_proto_id << 16; + sw_data[0] = ipv4_spec->hdr.next_proto_id + << 16 & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, + any_count > 0 ? DYN_TUN_L3 : DYN_L3, 8); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 8); + sw_counter += 1; + } + + if (any_count > 0 || fd->l3_prot != -1) + fd->tunnel_l3_prot = PROT_TUN_L3_IPV4; + + else + fd->l3_prot = PROT_L3_IPV4; + } + + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_IPV6", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_ipv6 *ipv6_spec = + (const struct rte_flow_item_ipv6 *)elem[eidx].spec; + const struct rte_flow_item_ipv6 *ipv6_mask = + (const struct rte_flow_item_ipv6 *)elem[eidx].mask; + + if (ipv6_spec == NULL || ipv6_mask == NULL) { + if (any_count > 0 || fd->l3_prot != -1) + fd->tunnel_l3_prot = PROT_TUN_L3_IPV6; + else + fd->l3_prot = PROT_L3_IPV6; + break; + } + + if (ipv6_mask->hdr.vtc_flow != 0 || + ipv6_mask->hdr.payload_len != 0 || + ipv6_mask->hdr.hop_limits != 0) { + NT_LOG(ERR, FILTER, + "Requested IPv6 field not support by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (is_non_zero(&ipv6_spec->hdr.src_addr, 16)) { + if (qw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4]; + + memcpy(&qw_data[0], &ipv6_spec->hdr.src_addr, 16); + memcpy(&qw_mask[0], &ipv6_mask->hdr.src_addr, 16); + + qw_data[0] = ntohl(qw_data[0]); + qw_data[1] = ntohl(qw_data[1]); + qw_data[2] = ntohl(qw_data[2]); + qw_data[3] = ntohl(qw_data[3]); + + qw_mask[0] = ntohl(qw_mask[0]); + qw_mask[1] = ntohl(qw_mask[1]); + qw_mask[2] = ntohl(qw_mask[2]); + qw_mask[3] = ntohl(qw_mask[3]); + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, + any_count > 0 ? DYN_TUN_L3 : DYN_L3, 8); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 8); + qw_counter += 1; + } + + if (is_non_zero(&ipv6_spec->hdr.dst_addr, 16)) { + if (qw_counter >= 2) { + NT_LOG(ERR, FILTER, + "Key size too big. Out of QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4]; + + memcpy(&qw_data[0], &ipv6_spec->hdr.dst_addr, 16); + memcpy(&qw_mask[0], &ipv6_mask->hdr.dst_addr, 16); + + qw_data[0] = ntohl(qw_data[0]); + qw_data[1] = ntohl(qw_data[1]); + qw_data[2] = ntohl(qw_data[2]); + qw_data[3] = ntohl(qw_data[3]); + + qw_mask[0] = ntohl(qw_mask[0]); + qw_mask[1] = ntohl(qw_mask[1]); + qw_mask[2] = ntohl(qw_mask[2]); + qw_mask[3] = ntohl(qw_mask[3]); + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, + any_count > 0 ? DYN_TUN_L3 : DYN_L3, 24); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 24); + qw_counter += 1; + } + + if (ipv6_mask->hdr.proto != 0) { + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = ipv6_mask->hdr.proto << 8; + sw_data[0] = ipv6_spec->hdr.proto << 8 & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], + 1, any_count > 0 ? DYN_TUN_L3 : DYN_L3, 4); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 4); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = 0; + qw_data[1] = ipv6_mask->hdr.proto << 8; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = 0; + qw_mask[1] = ipv6_spec->hdr.proto << 8; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], + 4, any_count > 0 ? DYN_TUN_L3 : DYN_L3, 0); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L3 : DYN_L3, 0); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + if (any_count > 0 || fd->l3_prot != -1) + fd->tunnel_l3_prot = PROT_TUN_L3_IPV6; + + else + fd->l3_prot = PROT_L3_IPV6; + } + + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_UDP", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_udp *udp_spec = + (const struct rte_flow_item_udp *)elem[eidx].spec; + const struct rte_flow_item_udp *udp_mask = + (const struct rte_flow_item_udp *)elem[eidx].mask; + + if (udp_spec == NULL || udp_mask == NULL) { + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_UDP; + key_def->inner_proto = 1; + } else { + fd->l4_prot = PROT_L4_UDP; + key_def->outer_proto = 1; + } + break; + } + + if (udp_mask->hdr.dgram_len != 0 || + udp_mask->hdr.dgram_cksum != 0) { + NT_LOG(ERR, FILTER, + "Requested UDP field not support by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) { + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) | + ntohs(udp_mask->hdr.dst_port); + sw_data[0] = ((ntohs(udp_spec->hdr.src_port) + << 16) | ntohs(udp_spec->hdr.dst_port)) & + sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], + 1, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = (ntohs(udp_spec->hdr.src_port) + << 16) | ntohs(udp_spec->hdr.dst_port); + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = (ntohs(udp_mask->hdr.src_port) + << 16) | ntohs(udp_mask->hdr.dst_port); + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], + 4, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_UDP; + key_def->inner_proto = 1; + + } else { + fd->l4_prot = PROT_L4_UDP; + key_def->outer_proto = 1; + } + } + + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + NT_LOG(DBG, FILTER, "Adap %i,Port %i:RTE_FLOW_ITEM_TYPE_SCTP", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_sctp *sctp_spec = + (const struct rte_flow_item_sctp *)elem[eidx].spec; + const struct rte_flow_item_sctp *sctp_mask = + (const struct rte_flow_item_sctp *)elem[eidx].mask; + + if (sctp_spec == NULL || sctp_mask == NULL) { + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_SCTP; + key_def->inner_proto = 1; + } else { + fd->l4_prot = PROT_L4_SCTP; + key_def->outer_proto = 1; + } + break; + } + + if (sctp_mask->hdr.tag != 0 || sctp_mask->hdr.cksum != 0) { + NT_LOG(ERR, FILTER, + "Requested SCTP field not support by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) { + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) + << 16) | ntohs(sctp_mask->hdr.dst_port); + sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) + << 16) | ntohs(sctp_spec->hdr.dst_port)) & + sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], + 1, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = (ntohs(sctp_spec->hdr.src_port) + << 16) | ntohs(sctp_spec->hdr.dst_port); + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = (ntohs(sctp_mask->hdr.src_port) + << 16) | ntohs(sctp_mask->hdr.dst_port); + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], + 4, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_SCTP; + key_def->inner_proto = 1; + + } else { + fd->l4_prot = PROT_L4_SCTP; + key_def->outer_proto = 1; + } + } + + break; + + case RTE_FLOW_ITEM_TYPE_ICMP: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_ICMP", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_icmp *icmp_spec = + (const struct rte_flow_item_icmp *)elem[eidx].spec; + const struct rte_flow_item_icmp *icmp_mask = + (const struct rte_flow_item_icmp *)elem[eidx].mask; + + if (icmp_spec == NULL || icmp_mask == NULL) { + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_ICMP; + fd->tunnel_ip_prot = 1; + key_def->inner_proto = 1; + } else { + fd->l4_prot = PROT_L4_ICMP; + fd->ip_prot = 1; + key_def->outer_proto = 1; + } + break; + } + + if (icmp_mask->hdr.icmp_cksum != 0 || + icmp_mask->hdr.icmp_ident != 0 || + icmp_mask->hdr.icmp_seq_nb != 0) { + NT_LOG(ERR, FILTER, + "Requested ICMP field not supported by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (icmp_mask->hdr.icmp_type || icmp_mask->hdr.icmp_code) { + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = icmp_mask->hdr.icmp_type << 24 | + icmp_mask->hdr.icmp_code << 16; + sw_data[0] = icmp_spec->hdr.icmp_type << 24 | + icmp_spec->hdr.icmp_code << 16; + sw_data[0] &= sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], + &sw_mask[0], 1, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_sw(key_def, sw_counter, + any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = icmp_spec->hdr.icmp_type << 24 | + icmp_spec->hdr.icmp_code << 16; + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = icmp_mask->hdr.icmp_type << 24 | + icmp_mask->hdr.icmp_code << 16; + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], + 4, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_ICMP; + fd->tunnel_ip_prot = 1; + key_def->inner_proto = 1; + + } else { + fd->l4_prot = PROT_L4_ICMP; + fd->ip_prot = 1; + key_def->outer_proto = 1; + } + } + + break; + + case RTE_FLOW_ITEM_TYPE_ICMP6: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_ICMP6", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_icmp6 *icmp_spec = + (const struct rte_flow_item_icmp6 *)elem[eidx].spec; + const struct rte_flow_item_icmp6 *icmp_mask = + (const struct rte_flow_item_icmp6 *)elem[eidx].mask; + + if (icmp_spec == NULL || icmp_mask == NULL) { + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_ICMP; + fd->tunnel_ip_prot = 58; + key_def->inner_proto = 1; + } else { + fd->l4_prot = PROT_L4_ICMP; + fd->ip_prot = 58; + key_def->outer_proto = 1; + } + break; + } + + if (icmp_mask->checksum != 0) { + NT_LOG(ERR, FILTER, + "Requested ICMP6 field not supported by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (icmp_mask->type || icmp_mask->code) { + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = icmp_mask->type << 24 | + icmp_mask->code << 16; + sw_data[0] = icmp_spec->type << 24 | + icmp_spec->code << 16; + sw_data[0] &= sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], + 1, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = icmp_spec->type << 24 | + icmp_spec->code << 16; + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = icmp_mask->type << 24 | + icmp_mask->code << 16; + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], + 4, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_ICMP; + fd->tunnel_ip_prot = 58; + key_def->inner_proto = 1; + + } else { + fd->l4_prot = PROT_L4_ICMP; + fd->ip_prot = 58; + key_def->outer_proto = 1; + } + } + + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_TCP", + dev->ndev->adapter_no, dev->port); + { + const struct rte_flow_item_tcp *tcp_spec = + (const struct rte_flow_item_tcp *)elem[eidx].spec; + const struct rte_flow_item_tcp *tcp_mask = + (const struct rte_flow_item_tcp *)elem[eidx].mask; + + if (tcp_spec == NULL || tcp_mask == NULL) { + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_TCP; + key_def->inner_proto = 1; + } else { + fd->l4_prot = PROT_L4_TCP; + key_def->outer_proto = 1; + } + break; + } + + if (tcp_mask->hdr.sent_seq != 0 || + tcp_mask->hdr.recv_ack != 0 || + tcp_mask->hdr.data_off != 0 || + tcp_mask->hdr.tcp_flags != 0 || + tcp_mask->hdr.rx_win != 0 || + tcp_mask->hdr.cksum != 0 || + tcp_mask->hdr.tcp_urp != 0) { + NT_LOG(ERR, FILTER, + "Requested TCP field not support by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) { + if (sw_counter < 2) { + uint32_t *sw_data = &packet_data[1 - sw_counter]; + uint32_t *sw_mask = &packet_mask[1 - sw_counter]; + + sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) + << 16) | ntohs(tcp_mask->hdr.dst_port); + sw_data[0] = + ((ntohs(tcp_spec->hdr.src_port) << 16) | + ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], + 1, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_sw(key_def, sw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - qw_counter * 4]; + + qw_data[0] = (ntohs(tcp_spec->hdr.src_port) + << 16) | ntohs(tcp_spec->hdr.dst_port); + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = (ntohs(tcp_mask->hdr.src_port) + << 16) | ntohs(tcp_mask->hdr.dst_port); + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], + 4, any_count > 0 ? DYN_TUN_L4 : DYN_L4, 0); + set_key_def_qw(key_def, qw_counter, any_count > 0 + ? DYN_TUN_L4 : DYN_L4, 0); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + if (any_count > 0 || fd->l4_prot != -1) { + fd->tunnel_l4_prot = PROT_TUN_L4_TCP; + key_def->inner_proto = 1; + + } else { + fd->l4_prot = PROT_L4_TCP; + key_def->outer_proto = 1; + } + } + + break; + + case RTE_FLOW_ITEM_TYPE_GTP: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_GTP", + dev->ndev->adapter_no, dev->port); + { + const struct rte_gtp_hdr *gtp_spec = + (const struct rte_gtp_hdr *)elem[eidx].spec; + const struct rte_gtp_hdr *gtp_mask = + (const struct rte_gtp_hdr *)elem[eidx].mask; + + if (gtp_spec == NULL || gtp_mask == NULL) { + fd->tunnel_prot = PROT_TUN_GTPV1U; + break; + } + + if (gtp_mask->gtp_hdr_info != 0 || + gtp_mask->msg_type != 0 || gtp_mask->plen != 0) { + NT_LOG(ERR, FILTER, + "Requested GTP field not support by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (gtp_mask->teid) { + if (sw_counter < 2) { + uint32_t *sw_data = + &packet_data[1 - sw_counter]; + uint32_t *sw_mask = + &packet_mask[1 - sw_counter]; + + sw_mask[0] = ntohl(gtp_mask->teid); + sw_data[0] = + ntohl(gtp_spec->teid) & sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], + &sw_mask[0], 1, + DYN_L4_PAYLOAD, 4); + set_key_def_sw(key_def, sw_counter, + DYN_L4_PAYLOAD, 4); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - + qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - + qw_counter * 4]; + + qw_data[0] = ntohl(gtp_spec->teid); + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = ntohl(gtp_mask->teid); + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], + &qw_mask[0], 4, + DYN_L4_PAYLOAD, 4); + set_key_def_qw(key_def, qw_counter, + DYN_L4_PAYLOAD, 4); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + fd->tunnel_prot = PROT_TUN_GTPV1U; + } + + break; + + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_GTP_PSC", + dev->ndev->adapter_no, dev->port); + { + const struct rte_gtp_psc_generic_hdr *gtp_psc_spec = + (const struct rte_gtp_psc_generic_hdr *)elem[eidx].spec; + const struct rte_gtp_psc_generic_hdr *gtp_psc_mask = + (const struct rte_gtp_psc_generic_hdr *)elem[eidx].mask; + + if (gtp_psc_spec == NULL || gtp_psc_mask == NULL) { + fd->tunnel_prot = PROT_TUN_GTPV1U; + break; + } + + if (gtp_psc_mask->type != 0 || + gtp_psc_mask->ext_hdr_len != 0) { + NT_LOG(ERR, FILTER, + "Requested GTP PSC field is not supported by running SW version"); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + + if (gtp_psc_mask->qfi) { + if (sw_counter < 2) { + uint32_t *sw_data = + &packet_data[1 - sw_counter]; + uint32_t *sw_mask = + &packet_mask[1 - sw_counter]; + + sw_mask[0] = ntohl(gtp_psc_mask->qfi); + sw_data[0] = ntohl(gtp_psc_spec->qfi) & + sw_mask[0]; + + km_add_match_elem(&fd->km, &sw_data[0], + &sw_mask[0], 1, + DYN_L4_PAYLOAD, 14); + set_key_def_sw(key_def, sw_counter, + DYN_L4_PAYLOAD, 14); + sw_counter += 1; + + } else if (qw_counter < 2 && qw_free > 0) { + uint32_t *qw_data = + &packet_data[2 + 4 - + qw_counter * 4]; + uint32_t *qw_mask = + &packet_mask[2 + 4 - + qw_counter * 4]; + + qw_data[0] = ntohl(gtp_psc_spec->qfi); + qw_data[1] = 0; + qw_data[2] = 0; + qw_data[3] = 0; + + qw_mask[0] = ntohl(gtp_psc_mask->qfi); + qw_mask[1] = 0; + qw_mask[2] = 0; + qw_mask[3] = 0; + + qw_data[0] &= qw_mask[0]; + qw_data[1] &= qw_mask[1]; + qw_data[2] &= qw_mask[2]; + qw_data[3] &= qw_mask[3]; + + km_add_match_elem(&fd->km, &qw_data[0], + &qw_mask[0], 4, + DYN_L4_PAYLOAD, 14); + set_key_def_qw(key_def, qw_counter, + DYN_L4_PAYLOAD, 14); + qw_counter += 1; + qw_free -= 1; + + } else { + NT_LOG(ERR, FILTER, + "Key size too big. Out of SW-QW resources."); + flow_nic_set_error(ERR_FAILED, error); + return -1; + } + } + + fd->tunnel_prot = PROT_TUN_GTPV1U; + } + + break; + + case RTE_FLOW_ITEM_TYPE_PORT_ID: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_PORT_ID", + dev->ndev->adapter_no, dev->port); + + if (elem[eidx].spec) { + *in_port_id = + ((const struct rte_flow_item_port_id *)elem[eidx].spec)->id; + } + + break; + + case RTE_FLOW_ITEM_TYPE_VOID: + NT_LOG(DBG, FILTER, "Adap %i, Port %i: RTE_FLOW_ITEM_TYPE_VOID", + dev->ndev->adapter_no, dev->port); + break; + + default: + NT_LOG(ERR, FILTER, "Invalid or unsupported flow request: %d", + (int)elem[eidx].type); + flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error); + return -1; + } + } + + return 0; +} + +static void copy_fd_to_fh_flm(struct flow_handle *fh, const struct nic_flow_def *fd, + const uint32_t *packet_data, uint32_t flm_key_id, uint32_t flm_ft, + uint16_t rpl_ext_ptr, uint32_t flm_scrub __rte_unused, uint32_t priority) +{ + for (int i = 0; i < MAX_FLM_MTRS_SUPPORTED; ++i) { + struct flm_flow_mtr_handle_s *handle = fh->dev->ndev->flm_mtr_handle; + struct flm_mtr_stat_s *mtr_stat = handle->port_stats[fh->caller_id]->stats; + fh->flm_mtr_ids[i] = + fd->mtr_ids[i] == UINT32_MAX ? 0 : mtr_stat[fd->mtr_ids[i]].flm_id; + } + + switch (fd->l4_prot) { + case PROT_L4_TCP: + fh->flm_prot = 6; + break; + + case PROT_L4_UDP: + fh->flm_prot = 17; + break; + + case PROT_L4_SCTP: + fh->flm_prot = 132; + break; + + case PROT_L4_ICMP: + fh->flm_prot = fd->ip_prot; + break; + + default: + switch (fd->tunnel_l4_prot) { + case PROT_TUN_L4_TCP: + fh->flm_prot = 6; + break; + + case PROT_TUN_L4_UDP: + fh->flm_prot = 17; + break; + + case PROT_TUN_L4_SCTP: + fh->flm_prot = 132; + break; + + case PROT_TUN_L4_ICMP: + fh->flm_prot = fd->tunnel_ip_prot; + break; + + default: + fh->flm_prot = 0; + break; + } + + break; + } + + memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10); + + fh->flm_kid = flm_key_id; + fh->flm_rpl_ext_ptr = rpl_ext_ptr; + fh->flm_prio = (uint8_t)priority; + fh->flm_ft = (uint8_t)flm_ft; + fh->flm_scrub_prof = (uint8_t)flm_scrub; + + for (unsigned int i = 0; i < fd->modify_field_count; ++i) { + switch (fd->modify_field[i].select) { + case CPY_SELECT_DSCP_IPV4: + case CPY_SELECT_RQI_QFI: + fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) & 0x1; + fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f; + break; + + case CPY_SELECT_IPV4: + fh->flm_nat_ipv4 = ntohl(fd->modify_field[i].value32[0]); + break; + + case CPY_SELECT_PORT: + fh->flm_nat_port = ntohs(fd->modify_field[i].value16[0]); + break; + + case CPY_SELECT_TEID: + fh->flm_teid = ntohl(fd->modify_field[i].value32[0]); + break; + + default: + NT_LOG(DBG, FILTER, "Unknown modify field: %d", + fd->modify_field[i].select); + break; + } + } + + fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe; + fh->context = fd->age.context; +} + +static int convert_fh_to_fh_flm(struct flow_handle *fh, const uint32_t *packet_data, + uint32_t flm_key_id, uint32_t flm_ft, uint16_t rpl_ext_ptr, + uint32_t flm_scrub, uint32_t priority) +{ + struct nic_flow_def *fd; + struct flow_handle fh_copy; + + if (fh->type != FLOW_HANDLE_TYPE_FLOW) + return -1; + + memcpy(&fh_copy, fh, sizeof(struct flow_handle)); + memset(fh, 0x0, sizeof(struct flow_handle)); + fd = fh_copy.fd; + + fh->type = FLOW_HANDLE_TYPE_FLM; + fh->caller_id = fh_copy.caller_id; + fh->dev = fh_copy.dev; + fh->next = fh_copy.next; + fh->prev = fh_copy.prev; + fh->user_data = fh_copy.user_data; + + fh->flm_db_idx_counter = fh_copy.db_idx_counter; + + for (int i = 0; i < RES_COUNT; ++i) + fh->flm_db_idxs[i] = fh_copy.db_idxs[i]; + + copy_fd_to_fh_flm(fh, fd, packet_data, flm_key_id, flm_ft, rpl_ext_ptr, flm_scrub, + priority); + + free(fd); + + return 0; +} + + +static void setup_db_qsl_data(struct nic_flow_def *fd, struct hw_db_inline_qsl_data *qsl_data, + uint32_t num_dest_port, uint32_t num_queues) +{ + memset(qsl_data, 0x0, sizeof(struct hw_db_inline_qsl_data)); + + if (fd->dst_num_avail <= 0) { + qsl_data->drop = 1; + + } else { + assert(fd->dst_num_avail < HW_DB_INLINE_MAX_QST_PER_QSL); + + uint32_t ports[fd->dst_num_avail]; + uint32_t queues[fd->dst_num_avail]; + + uint32_t port_index = 0; + uint32_t queue_index = 0; + uint32_t max = num_dest_port > num_queues ? num_dest_port : num_queues; + + memset(ports, 0, fd->dst_num_avail); + memset(queues, 0, fd->dst_num_avail); + + qsl_data->table_size = max; + qsl_data->retransmit = num_dest_port > 0 ? 1 : 0; + + for (int i = 0; i < fd->dst_num_avail; ++i) + if (fd->dst_id[i].type == PORT_PHY) + ports[port_index++] = fd->dst_id[i].id; + + else if (fd->dst_id[i].type == PORT_VIRT) + queues[queue_index++] = fd->dst_id[i].id; + + for (uint32_t i = 0; i < max; ++i) { + if (num_dest_port > 0) { + qsl_data->table[i].tx_port = ports[i % num_dest_port]; + qsl_data->table[i].tx_port_en = 1; + } + + if (num_queues > 0) { + qsl_data->table[i].queue = queues[i % num_queues]; + qsl_data->table[i].queue_en = 1; + } + } + } +} + +static void setup_db_hsh_data(struct nic_flow_def *fd, struct hw_db_inline_hsh_data *hsh_data) +{ + memset(hsh_data, 0x0, sizeof(struct hw_db_inline_hsh_data)); + + hsh_data->func = fd->hsh.func; + hsh_data->hash_mask = fd->hsh.types; + + if (fd->hsh.key != NULL) { + /* + * Just a safeguard. Check and error handling of rss_key_len + * shall be done at api layers above. + */ + memcpy(&hsh_data->key, fd->hsh.key, + fd->hsh.key_len < MAX_RSS_KEY_LEN ? fd->hsh.key_len : MAX_RSS_KEY_LEN); + } +} + +static int setup_flow_flm_actions(struct flow_eth_dev *dev, + const struct nic_flow_def *fd, + const struct hw_db_inline_qsl_data *qsl_data, + const struct hw_db_inline_hsh_data *hsh_data, + uint32_t group, + uint32_t local_idxs[], + uint32_t *local_idx_counter, + uint16_t *flm_rpl_ext_ptr, + uint32_t *flm_ft, + uint32_t *flm_scrub __rte_unused, + struct rte_flow_error *error) +{ + const bool empty_pattern = fd_has_empty_pattern(fd); + + /* Setup COT */ + struct hw_db_inline_cot_data cot_data = { + .matcher_color_contrib = empty_pattern ? 0x0 : 0x4, /* FT key C */ + .frag_rcp = empty_pattern ? fd->flm_mtu_fragmentation_recipe : 0, + }; + struct hw_db_cot_idx cot_idx = + hw_db_inline_cot_add(dev->ndev, dev->ndev->hw_db_handle, &cot_data); + local_idxs[(*local_idx_counter)++] = cot_idx.raw; + + if (cot_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference COT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + /* Finalize QSL */ + struct hw_db_qsl_idx qsl_idx = + hw_db_inline_qsl_add(dev->ndev, dev->ndev->hw_db_handle, qsl_data); + local_idxs[(*local_idx_counter)++] = qsl_idx.raw; + + if (qsl_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference QSL resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + /* Setup HSH */ + struct hw_db_hsh_idx hsh_idx = + hw_db_inline_hsh_add(dev->ndev, dev->ndev->hw_db_handle, hsh_data); + local_idxs[(*local_idx_counter)++] = hsh_idx.raw; + + if (hsh_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference HSH resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + /* Setup SLC LR */ + struct hw_db_slc_lr_idx slc_lr_idx = { .raw = 0 }; + + if (fd->header_strip_end_dyn != 0 || fd->header_strip_end_ofs != 0) { + struct hw_db_inline_slc_lr_data slc_lr_data = { + .head_slice_en = 1, + .head_slice_dyn = fd->header_strip_end_dyn, + .head_slice_ofs = fd->header_strip_end_ofs, + }; + slc_lr_idx = + hw_db_inline_slc_lr_add(dev->ndev, dev->ndev->hw_db_handle, &slc_lr_data); + local_idxs[(*local_idx_counter)++] = slc_lr_idx.raw; + + if (slc_lr_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference SLC LR resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + } + + /* Setup TPE EXT */ + if (fd->tun_hdr.len > 0) { + assert(fd->tun_hdr.len <= HW_DB_INLINE_MAX_ENCAP_SIZE); + + struct hw_db_inline_tpe_ext_data tpe_ext_data = { + .size = fd->tun_hdr.len, + }; + + memset(tpe_ext_data.hdr8, 0x0, HW_DB_INLINE_MAX_ENCAP_SIZE); + memcpy(tpe_ext_data.hdr8, fd->tun_hdr.d.hdr8, (fd->tun_hdr.len + 15) & ~15); + + struct hw_db_tpe_ext_idx tpe_ext_idx = + hw_db_inline_tpe_ext_add(dev->ndev, dev->ndev->hw_db_handle, + &tpe_ext_data); + local_idxs[(*local_idx_counter)++] = tpe_ext_idx.raw; + + if (tpe_ext_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference TPE EXT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + if (flm_rpl_ext_ptr) + *flm_rpl_ext_ptr = tpe_ext_idx.ids; + } + + /* Setup TPE */ + assert(fd->modify_field_count <= 6); + + struct hw_db_inline_tpe_data tpe_data = { + .insert_len = fd->tun_hdr.len, + .new_outer = fd->tun_hdr.new_outer, + .calc_eth_type_from_inner_ip = + !fd->tun_hdr.new_outer && fd->header_strip_end_dyn == DYN_TUN_L3, + .ttl_en = fd->ttl_sub_enable, + .ttl_dyn = fd->ttl_sub_outer ? DYN_L3 : DYN_TUN_L3, + .ttl_ofs = fd->ttl_sub_ipv4 ? 8 : 7, + }; + + for (unsigned int i = 0; i < fd->modify_field_count; ++i) { + tpe_data.writer[i].en = 1; + tpe_data.writer[i].reader_select = fd->modify_field[i].select; + tpe_data.writer[i].dyn = fd->modify_field[i].dyn; + tpe_data.writer[i].ofs = fd->modify_field[i].ofs; + tpe_data.writer[i].len = fd->modify_field[i].len; + } + + if (fd->tun_hdr.new_outer) { + const int fcs_length = 4; + + /* L4 length */ + tpe_data.len_a_en = 1; + tpe_data.len_a_pos_dyn = DYN_L4; + tpe_data.len_a_pos_ofs = 4; + tpe_data.len_a_add_dyn = 18; + tpe_data.len_a_add_ofs = (uint32_t)(-fcs_length) & 0xff; + tpe_data.len_a_sub_dyn = DYN_L4; + + /* L3 length */ + tpe_data.len_b_en = 1; + tpe_data.len_b_pos_dyn = DYN_L3; + tpe_data.len_b_pos_ofs = fd->tun_hdr.ip_version == 4 ? 2 : 4; + tpe_data.len_b_add_dyn = 18; + tpe_data.len_b_add_ofs = (uint32_t)(-fcs_length) & 0xff; + tpe_data.len_b_sub_dyn = DYN_L3; + + /* GTP length */ + tpe_data.len_c_en = 1; + tpe_data.len_c_pos_dyn = DYN_L4_PAYLOAD; + tpe_data.len_c_pos_ofs = 2; + tpe_data.len_c_add_dyn = 18; + tpe_data.len_c_add_ofs = (uint32_t)(-8 - fcs_length) & 0xff; + tpe_data.len_c_sub_dyn = DYN_L4_PAYLOAD; + } + + struct hw_db_tpe_idx tpe_idx = + hw_db_inline_tpe_add(dev->ndev, dev->ndev->hw_db_handle, &tpe_data); + + local_idxs[(*local_idx_counter)++] = tpe_idx.raw; + + if (tpe_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference TPE resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + /* Setup SCRUB profile */ + struct hw_db_inline_scrub_data scrub_data = { .timeout = fd->age.timeout }; + struct hw_db_flm_scrub_idx scrub_idx = + hw_db_inline_scrub_add(dev->ndev, dev->ndev->hw_db_handle, &scrub_data); + local_idxs[(*local_idx_counter)++] = scrub_idx.raw; + + if (scrub_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference FLM SCRUB resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + if (flm_scrub) + *flm_scrub = scrub_idx.ids; + + /* Setup Action Set */ + struct hw_db_inline_action_set_data action_set_data = { + .contains_jump = 0, + .cot = cot_idx, + .qsl = qsl_idx, + .slc_lr = slc_lr_idx, + .tpe = tpe_idx, + .hsh = hsh_idx, + .scrub = scrub_idx, + }; + struct hw_db_action_set_idx action_set_idx = + hw_db_inline_action_set_add(dev->ndev, dev->ndev->hw_db_handle, &action_set_data); + local_idxs[(*local_idx_counter)++] = action_set_idx.raw; + + if (action_set_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference Action Set resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + /* Setup FLM FT */ + struct hw_db_inline_flm_ft_data flm_ft_data = { + .is_group_zero = 0, + .group = group, + .action_set = action_set_idx, + }; + struct hw_db_flm_ft flm_ft_idx = empty_pattern + ? hw_db_inline_flm_ft_default(dev->ndev, dev->ndev->hw_db_handle, &flm_ft_data) + : hw_db_inline_flm_ft_add(dev->ndev, dev->ndev->hw_db_handle, &flm_ft_data); + local_idxs[(*local_idx_counter)++] = flm_ft_idx.raw; + + if (flm_ft_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference FLM FT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + return -1; + } + + if (flm_ft) + *flm_ft = flm_ft_idx.id1; + + return 0; +} + +static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd, + const struct rte_flow_attr *attr, + uint16_t forced_vlan_vid __rte_unused, uint16_t caller_id, + struct rte_flow_error *error, uint32_t port_id, + uint32_t num_dest_port, uint32_t num_queues, + uint32_t *packet_data, uint32_t *packet_mask, + struct flm_flow_key_def_s *key_def) +{ + struct flow_handle *fh = calloc(1, sizeof(struct flow_handle)); + + fh->type = FLOW_HANDLE_TYPE_FLOW; + fh->port_id = port_id; + fh->dev = dev; + fh->fd = fd; + fh->caller_id = caller_id; + + struct hw_db_inline_qsl_data qsl_data; + setup_db_qsl_data(fd, &qsl_data, num_dest_port, num_queues); + + struct hw_db_inline_hsh_data hsh_data; + setup_db_hsh_data(fd, &hsh_data); + + if (attr->group > 0 && fd_has_empty_pattern(fd)) { + /* + * Default flow for group 1..32 + */ + + if (setup_flow_flm_actions(dev, fd, &qsl_data, &hsh_data, attr->group, fh->db_idxs, + &fh->db_idx_counter, NULL, NULL, NULL, error)) { + goto error_out; + } + + fh->context = fd->age.context; + nic_insert_flow(dev->ndev, fh); + + } else if (attr->group > 0) { + /* + * Flow for group 1..32 + */ + + /* Setup FLM RCP */ + struct hw_db_inline_flm_rcp_data flm_data = { + .qw0_dyn = key_def->qw0_dyn, + .qw0_ofs = key_def->qw0_ofs, + .qw4_dyn = key_def->qw4_dyn, + .qw4_ofs = key_def->qw4_ofs, + .sw8_dyn = key_def->sw8_dyn, + .sw8_ofs = key_def->sw8_ofs, + .sw9_dyn = key_def->sw9_dyn, + .sw9_ofs = key_def->sw9_ofs, + .outer_prot = key_def->outer_proto, + .inner_prot = key_def->inner_proto, + }; + memcpy(flm_data.mask, packet_mask, sizeof(uint32_t) * 10); + struct hw_db_flm_idx flm_idx = + hw_db_inline_flm_add(dev->ndev, dev->ndev->hw_db_handle, &flm_data, + attr->group); + fh->db_idxs[fh->db_idx_counter++] = flm_idx.raw; + + if (flm_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference FLM RPC resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup Actions */ + uint16_t flm_rpl_ext_ptr = 0; + uint32_t flm_ft = 0; + uint32_t flm_scrub = 0; + + if (setup_flow_flm_actions(dev, fd, &qsl_data, &hsh_data, attr->group, fh->db_idxs, + &fh->db_idx_counter, &flm_rpl_ext_ptr, &flm_ft, + &flm_scrub, error)) { + goto error_out; + } + + /* Program flow */ + convert_fh_to_fh_flm(fh, packet_data, flm_idx.id1 + 2, flm_ft, flm_rpl_ext_ptr, + flm_scrub, attr->priority & 0x3); + flm_flow_programming(fh, NT_FLM_OP_LEARN); + + nic_insert_flow_flm(dev->ndev, fh); + + } else { + /* + * Flow for group 0 + */ + int identical_km_entry_ft = -1; + + /* Setup Action Set */ + + /* SCRUB/AGE action is not supported for group 0 */ + if (fd->age.timeout != 0 || fd->age.context != NULL) { + NT_LOG(ERR, FILTER, "Action AGE is not supported for flow in group 0"); + flow_nic_set_error(ERR_ACTION_AGE_UNSUPPORTED_GROUP_0, error); + goto error_out; + } + + /* NOTE: SCRUB record 0 is used by default with timeout 0, i.e. flow will never + * AGE-out + */ + struct hw_db_inline_action_set_data action_set_data = { 0 }; + (void)action_set_data; + + if (fd->jump_to_group != UINT32_MAX) { + /* Action Set only contains jump */ + action_set_data.contains_jump = 1; + action_set_data.jump = fd->jump_to_group; + + } else { + /* Action Set doesn't contain jump */ + action_set_data.contains_jump = 0; + + /* Setup COT */ + struct hw_db_inline_cot_data cot_data = { + .matcher_color_contrib = 0, + .frag_rcp = fd->flm_mtu_fragmentation_recipe, + }; + struct hw_db_cot_idx cot_idx = + hw_db_inline_cot_add(dev->ndev, dev->ndev->hw_db_handle, + &cot_data); + fh->db_idxs[fh->db_idx_counter++] = cot_idx.raw; + action_set_data.cot = cot_idx; + + if (cot_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference COT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Finalize QSL */ + struct hw_db_qsl_idx qsl_idx = + hw_db_inline_qsl_add(dev->ndev, dev->ndev->hw_db_handle, + &qsl_data); + fh->db_idxs[fh->db_idx_counter++] = qsl_idx.raw; + action_set_data.qsl = qsl_idx; + + if (qsl_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference QSL resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup HSH */ + struct hw_db_hsh_idx hsh_idx = + hw_db_inline_hsh_add(dev->ndev, dev->ndev->hw_db_handle, + &hsh_data); + fh->db_idxs[fh->db_idx_counter++] = hsh_idx.raw; + action_set_data.hsh = hsh_idx; + + if (hsh_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference HSH resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup TPE */ + if (fd->ttl_sub_enable) { + struct hw_db_inline_tpe_data tpe_data = { + .insert_len = fd->tun_hdr.len, + .new_outer = fd->tun_hdr.new_outer, + .calc_eth_type_from_inner_ip = !fd->tun_hdr.new_outer && + fd->header_strip_end_dyn == DYN_TUN_L3, + .ttl_en = fd->ttl_sub_enable, + .ttl_dyn = fd->ttl_sub_outer ? DYN_L3 : DYN_TUN_L3, + .ttl_ofs = fd->ttl_sub_ipv4 ? 8 : 7, + }; + struct hw_db_tpe_idx tpe_idx = + hw_db_inline_tpe_add(dev->ndev, dev->ndev->hw_db_handle, + &tpe_data); + fh->db_idxs[fh->db_idx_counter++] = tpe_idx.raw; + action_set_data.tpe = tpe_idx; + + if (tpe_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference TPE resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + } + } + + struct hw_db_action_set_idx action_set_idx = + hw_db_inline_action_set_add(dev->ndev, dev->ndev->hw_db_handle, + &action_set_data); + + fh->db_idxs[fh->db_idx_counter++] = action_set_idx.raw; + + if (action_set_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference Action Set resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup CAT */ + struct hw_db_inline_cat_data cat_data = { + .vlan_mask = (0xf << fd->vlans) & 0xf, + .mac_port_mask = 1 << fh->port_id, + .ptc_mask_frag = fd->fragmentation, + .ptc_mask_l2 = fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1, + .ptc_mask_l3 = fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1, + .ptc_mask_l4 = fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1, + .err_mask_ttl = (fd->ttl_sub_enable && + fd->ttl_sub_outer) ? -1 : 0x1, + .ptc_mask_tunnel = fd->tunnel_prot != + -1 ? (1 << fd->tunnel_prot) : -1, + .ptc_mask_l3_tunnel = + fd->tunnel_l3_prot != -1 ? (1 << fd->tunnel_l3_prot) : -1, + .ptc_mask_l4_tunnel = + fd->tunnel_l4_prot != -1 ? (1 << fd->tunnel_l4_prot) : -1, + .err_mask_ttl_tunnel = + (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1, + .ip_prot = fd->ip_prot, + .ip_prot_tunnel = fd->tunnel_ip_prot, + }; + struct hw_db_cat_idx cat_idx = + hw_db_inline_cat_add(dev->ndev, dev->ndev->hw_db_handle, &cat_data); + fh->db_idxs[fh->db_idx_counter++] = cat_idx.raw; + + if (cat_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference CAT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup KM RCP */ + struct hw_db_inline_km_rcp_data km_rcp_data = { .rcp = 0 }; + + if (fd->km.num_ftype_elem) { + struct flow_handle *flow = dev->ndev->flow_base, *found_flow = NULL; + + if (km_key_create(&fd->km, fh->port_id)) { + NT_LOG(ERR, FILTER, "KM creation failed"); + flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS, error); + goto error_out; + } + + fd->km.be = &dev->ndev->be; + + /* Look for existing KM RCPs */ + while (flow) { + if (flow->type == FLOW_HANDLE_TYPE_FLOW && + flow->fd->km.flow_type) { + int res = km_key_compare(&fd->km, &flow->fd->km); + + if (res < 0) { + /* Flow rcp and match data is identical */ + identical_km_entry_ft = flow->fd->km.flow_type; + found_flow = flow; + break; + } + + if (res > 0) { + /* Flow rcp found and match data is different */ + found_flow = flow; + } + } + + flow = flow->next; + } + + km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle); + + if (found_flow != NULL) { + /* Reuse existing KM RCP */ + const struct hw_db_inline_km_rcp_data *other_km_rcp_data = + hw_db_inline_find_data(dev->ndev, dev->ndev->hw_db_handle, + HW_DB_IDX_TYPE_KM_RCP, + (struct hw_db_idx *) + found_flow->flm_db_idxs, + found_flow->flm_db_idx_counter); + + if (other_km_rcp_data == NULL || + flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, + other_km_rcp_data->rcp)) { + NT_LOG(ERR, FILTER, + "Could not reference existing KM RCP resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + km_rcp_data.rcp = other_km_rcp_data->rcp; + } else { + /* Alloc new KM RCP */ + int rcp = flow_nic_alloc_resource(dev->ndev, RES_KM_CATEGORY, 1); + + if (rcp < 0) { + NT_LOG(ERR, FILTER, + "Could not reference KM RCP resource (flow_nic_alloc)"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + km_rcp_set(&fd->km, rcp); + km_rcp_data.rcp = (uint32_t)rcp; + } + } + + struct hw_db_km_idx km_idx = + hw_db_inline_km_add(dev->ndev, dev->ndev->hw_db_handle, &km_rcp_data); + + fh->db_idxs[fh->db_idx_counter++] = km_idx.raw; + + if (km_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference KM RCP resource (db_inline)"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup KM FT */ + struct hw_db_inline_km_ft_data km_ft_data = { + .cat = cat_idx, + .km = km_idx, + .action_set = action_set_idx, + }; + struct hw_db_km_ft km_ft_idx = + hw_db_inline_km_ft_add(dev->ndev, dev->ndev->hw_db_handle, &km_ft_data); + fh->db_idxs[fh->db_idx_counter++] = km_ft_idx.raw; + + if (km_ft_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference KM FT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Finalize KM RCP */ + if (fd->km.num_ftype_elem) { + if (identical_km_entry_ft >= 0 && identical_km_entry_ft != km_ft_idx.id1) { + NT_LOG(ERR, FILTER, + "Identical KM matches cannot have different KM FTs"); + flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS, error); + goto error_out; + } + + fd->km.flow_type = km_ft_idx.id1; + + if (fd->km.target == KM_CAM) { + uint32_t ft_a_mask = 0; + hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A, + (int)km_rcp_data.rcp, 0, &ft_a_mask); + hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A, + (int)km_rcp_data.rcp, 0, + ft_a_mask | (1 << fd->km.flow_type)); + } + + hw_mod_km_rcp_flush(&dev->ndev->be, (int)km_rcp_data.rcp, 1); + + km_write_data_match_entry(&fd->km, 0); + } + + /* Setup Match Set */ + struct hw_db_inline_match_set_data match_set_data = { + .cat = cat_idx, + .km = km_idx, + .km_ft = km_ft_idx, + .action_set = action_set_idx, + .jump = fd->jump_to_group != UINT32_MAX ? fd->jump_to_group : 0, + .priority = attr->priority & 0xff, + }; + struct hw_db_match_set_idx match_set_idx = + hw_db_inline_match_set_add(dev->ndev, dev->ndev->hw_db_handle, + &match_set_data); + fh->db_idxs[fh->db_idx_counter++] = match_set_idx.raw; + + if (match_set_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference Match Set resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + /* Setup FLM FT */ + struct hw_db_inline_flm_ft_data flm_ft_data = { + .is_group_zero = 1, + .jump = fd->jump_to_group != UINT32_MAX ? fd->jump_to_group : 0, + .action_set = action_set_idx, + + }; + struct hw_db_flm_ft flm_ft_idx = + hw_db_inline_flm_ft_add(dev->ndev, dev->ndev->hw_db_handle, &flm_ft_data); + fh->db_idxs[fh->db_idx_counter++] = flm_ft_idx.raw; + + if (flm_ft_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference FLM FT resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + nic_insert_flow(dev->ndev, fh); + } + + return fh; + +error_out: + + if (fh->type == FLOW_HANDLE_TYPE_FLM) { + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)fh->flm_db_idxs, + fh->flm_db_idx_counter); + + } else { + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)fh->db_idxs, fh->db_idx_counter); + } + + free(fh); + + return NULL; +} + +/* + * FPGA uses up to 10 32-bit words (320 bits) for hash calculation + 8 bits for L4 protocol number. + * Hashed data are split between two 128-bit Quad Words (QW) + * and two 32-bit Words (W), which can refer to different header parts. + */ +enum hsh_words_id { + HSH_WORDS_QW0 = 0, + HSH_WORDS_QW4, + HSH_WORDS_W8, + HSH_WORDS_W9, + HSH_WORDS_SIZE, +}; + +/* struct with details about hash QWs & Ws */ +struct hsh_words { + /* + * index of W (word) or index of 1st word of QW (quad word) + * is used for hash mask calculation + */ + uint8_t index; + uint8_t toeplitz_index; /* offset in Bytes of given [Q]W inside Toeplitz RSS key */ + enum hw_hsh_e pe; /* offset to header part, e.g. beginning of L4 */ + enum hw_hsh_e ofs; /* relative offset in BYTES to 'pe' header offset above */ + uint16_t bit_len; /* max length of header part in bits to fit into QW/W */ + bool free; /* only free words can be used for hsh calculation */ +}; + +static enum hsh_words_id get_free_word(struct hsh_words *words, uint16_t bit_len) +{ + enum hsh_words_id ret = HSH_WORDS_SIZE; + uint16_t ret_bit_len = UINT16_MAX; + + for (enum hsh_words_id i = HSH_WORDS_QW0; i < HSH_WORDS_SIZE; i++) { + if (words[i].free && bit_len <= words[i].bit_len && + words[i].bit_len < ret_bit_len) { + ret = i; + ret_bit_len = words[i].bit_len; + } + } + + return ret; +} + +static int flow_nic_set_hasher_part_inline(struct flow_nic_dev *ndev, int hsh_idx, + struct hsh_words *words, uint32_t pe, uint32_t ofs, + int bit_len, bool toeplitz) +{ + int res = 0; + + /* check if there is any free word, which can accommodate header part of given 'bit_len' */ + enum hsh_words_id word = get_free_word(words, bit_len); + + if (word == HSH_WORDS_SIZE) { + NT_LOG(ERR, FILTER, "Cannot add additional %d bits into hash", bit_len); + return -1; + } + + words[word].free = false; + + res |= hw_mod_hsh_rcp_set(&ndev->be, words[word].pe, hsh_idx, 0, pe); + NT_LOG(DBG, FILTER, "hw_mod_hsh_rcp_set(&ndev->be, %d, %d, 0, %d)", words[word].pe, + hsh_idx, pe); + res |= hw_mod_hsh_rcp_set(&ndev->be, words[word].ofs, hsh_idx, 0, ofs); + NT_LOG(DBG, FILTER, "hw_mod_hsh_rcp_set(&ndev->be, %d, %d, 0, %d)", words[word].ofs, + hsh_idx, ofs); + + /* set HW_HSH_RCP_WORD_MASK based on used QW/W and given 'bit_len' */ + int mask_bit_len = bit_len; + uint32_t mask = 0x0; + uint32_t mask_be = 0x0; + uint32_t toeplitz_mask[9] = { 0x0 }; + /* iterate through all words of QW */ + uint16_t words_count = words[word].bit_len / 32; + + for (uint16_t mask_off = 1; mask_off <= words_count; mask_off++) { + if (mask_bit_len >= 32) { + mask_bit_len -= 32; + mask = 0xffffffff; + mask_be = mask; + + } else if (mask_bit_len > 0) { + /* keep bits from left to right, i.e. little to big endian */ + mask_be = 0xffffffff >> (32 - mask_bit_len); + mask = mask_be << (32 - mask_bit_len); + mask_bit_len = 0; + + } else { + mask = 0x0; + mask_be = 0x0; + } + + /* reorder QW words mask from little to big endian */ + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, + words[word].index + words_count - mask_off, mask); + NT_LOG(DBG, FILTER, + "hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, %d, %d, 0x%" PRIX32 + ")", + hsh_idx, words[word].index + words_count - mask_off, mask); + toeplitz_mask[words[word].toeplitz_index + mask_off - 1] = mask_be; + } + + if (toeplitz) { + NT_LOG(DBG, FILTER, + "Partial Toeplitz RSS key mask: %08" PRIX32 " %08" PRIX32 " %08" PRIX32 + " %08" PRIX32 " %08" PRIX32 " %08" PRIX32 " %08" PRIX32 " %08" PRIX32 + " %08" PRIX32 "", + toeplitz_mask[8], toeplitz_mask[7], toeplitz_mask[6], toeplitz_mask[5], + toeplitz_mask[4], toeplitz_mask[3], toeplitz_mask[2], toeplitz_mask[1], + toeplitz_mask[0]); + NT_LOG(DBG, FILTER, + " MSB LSB"); + } + + return res; +} + +/* + * Public functions + */ + +int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev) +{ + if (!ndev->flow_mgnt_prepared) { + /* Check static arrays are big enough */ + assert(ndev->be.tpe.nb_cpy_writers <= MAX_CPY_WRITERS_SUPPORTED); + /* KM Flow Type 0 is reserved */ + flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0); + flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0); + + /* Reserved FLM Flow Types */ + flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, NT_FLM_MISS_FLOW_TYPE); + flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, NT_FLM_UNHANDLED_FLOW_TYPE); + flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, + NT_FLM_VIOLATING_MBR_FLOW_TYPE); + flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0); + + /* COT is locked to CFN. Don't set color for CFN 0 */ + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0); + + if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0) + goto err_exit0; + + /* Initialize QSL with unmatched recipe index 0 - discard */ + if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0) + goto err_exit0; + + if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0) + goto err_exit0; + + flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0); + + /* Initialize QST with default index 0 */ + if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0, 0x0) < 0) + goto err_exit0; + + if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0) + goto err_exit0; + + flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0); + + /* SLC LR & TPE index 0 were reserved */ + flow_nic_mark_resource_used(ndev, RES_SLC_LR_RCP, 0); + flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0); + flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0); + flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0); + + /* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 + */ + if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) < 0) + goto err_exit0; + + if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) < 0) + goto err_exit0; + + if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0) + goto err_exit0; + + flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0); + + /* Set default hasher recipe to 5-tuple */ + flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE); + hw_mod_hsh_rcp_flush(&ndev->be, 0, 1); + + flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0); + + /* Initialize SCRUB with default index 0, i.e. flow will never AGE-out */ + if (hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_PRESET_ALL, 0, 0) < 0) + goto err_exit0; + + if (hw_mod_flm_scrub_flush(&ndev->be, 0, 1) < 0) + goto err_exit0; + + flow_nic_mark_resource_used(ndev, RES_SCRUB_RCP, 0); + + /* Setup filter using matching all packets violating traffic policing parameters */ + flow_nic_mark_resource_used(ndev, RES_CAT_CFN, NT_VIOLATING_MBR_CFN); + flow_nic_mark_resource_used(ndev, RES_QSL_RCP, NT_VIOLATING_MBR_QSL); + + if (hw_db_inline_setup_mbr_filter(ndev, NT_VIOLATING_MBR_CFN, + NT_FLM_VIOLATING_MBR_FLOW_TYPE, + NT_VIOLATING_MBR_QSL) < 0) + goto err_exit0; + + /* FLM */ + if (flm_sdram_calibrate(ndev) < 0) + goto err_exit0; + + if (flm_sdram_reset(ndev, 1) < 0) + goto err_exit0; + + /* Learn done status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS, 0); + /* Learn fail status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS, 1); + /* Learn ignore status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS, 1); + /* Unlearn done status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS, 0); + /* Unlearn ignore status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS, 0); + /* Relearn done status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS, 0); + /* Relearn ignore status */ + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS, 0); + hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4); + hw_mod_flm_control_flush(&ndev->be); + + /* Set the sliding windows size for flm load */ + uint32_t bin = (uint32_t)(((FLM_LOAD_WINDOWS_SIZE * 1000000000000ULL) / + (32ULL * ndev->be.flm.nb_rpp_clock_in_ps)) - + 1ULL); + hw_mod_flm_load_bin_set(&ndev->be, HW_FLM_LOAD_BIN, bin); + hw_mod_flm_load_bin_flush(&ndev->be); + + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0, + 0); /* Drop at 100% FIFO fill level */ + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1); + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1, + 14); /* Drop at 87,5% FIFO fill level */ + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1); + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2, + 10); /* Drop at 62,5% FIFO fill level */ + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1); + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3, + 6); /* Drop at 37,5% FIFO fill level */ + hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1); + hw_mod_flm_prio_flush(&ndev->be); + + /* TODO How to set and use these limits */ + for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) { + hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i, + NTNIC_FLOW_PERIODIC_STATS_BYTE_LIMIT); + hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i, + NTNIC_FLOW_PERIODIC_STATS_PKT_LIMIT); + hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i, + NTNIC_FLOW_PERIODIC_STATS_BYTE_TIMEOUT); + } + + hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES); + + ndev->id_table_handle = ntnic_id_table_create(); + + if (ndev->id_table_handle == NULL) + goto err_exit0; + + ndev->flm_mtr_handle = calloc(1, sizeof(struct flm_flow_mtr_handle_s)); + struct flm_mtr_shared_stats_s *flm_shared_stats = + calloc(1, sizeof(struct flm_mtr_shared_stats_s)); + struct flm_mtr_stat_s *flm_stats = + calloc(FLM_MTR_STAT_SIZE, sizeof(struct flm_mtr_stat_s)); + + if (ndev->flm_mtr_handle == NULL || flm_shared_stats == NULL || + flm_stats == NULL) { + free(ndev->flm_mtr_handle); + free(flm_shared_stats); + free(flm_stats); + goto err_exit0; + } + + for (uint32_t i = 0; i < UINT8_MAX; ++i) { + ((struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle)->port_stats[i] = + flm_shared_stats; + } + + flm_shared_stats->stats = flm_stats; + flm_shared_stats->size = FLM_MTR_STAT_SIZE; + flm_shared_stats->shared = UINT8_MAX; + + if (flow_group_handle_create(&ndev->group_handle, ndev->be.flm.nb_categories)) + goto err_exit0; + + if (hw_db_inline_create(ndev, &ndev->hw_db_handle)) + goto err_exit0; + + ndev->flow_mgnt_prepared = 1; + } + + return 0; + +err_exit0: + done_flow_management_of_ndev_profile_inline(ndev); + return -1; +} + +int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev) +{ +#ifdef FLOW_DEBUG + ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE); +#endif + + if (ndev->flow_mgnt_prepared) { + flm_sdram_reset(ndev, 0); + + flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0); + flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0); + + hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0); + hw_mod_flm_rcp_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0); + flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1); + flow_nic_free_resource(ndev, RES_FLM_RCP, 0); + + for (uint32_t i = 0; i < UINT8_MAX; ++i) { + struct flm_flow_mtr_handle_s *handle = ndev->flm_mtr_handle; + handle->port_stats[i]->shared -= 1; + + if (handle->port_stats[i]->shared == 0) { + free(handle->port_stats[i]->stats); + free(handle->port_stats[i]); + } + } + + free(ndev->flm_mtr_handle); + + flow_group_handle_destroy(&ndev->group_handle); + ntnic_id_table_destroy(ndev->id_table_handle); + + hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0); + hw_mod_cat_cfn_flush(&ndev->be, 0, 1); + hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0); + hw_mod_cat_cot_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_CAT_CFN, 0); + + hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0); + hw_mod_qsl_rcp_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_QSL_RCP, 0); + + hw_mod_slc_lr_rcp_set(&ndev->be, HW_SLC_LR_RCP_PRESET_ALL, 0, 0); + hw_mod_slc_lr_rcp_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_SLC_LR_RCP, 0); + + hw_mod_tpe_reset(&ndev->be); + flow_nic_free_resource(ndev, RES_TPE_RCP, 0); + flow_nic_free_resource(ndev, RES_TPE_EXT, 0); + flow_nic_free_resource(ndev, RES_TPE_RPL, 0); + + hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0); + hw_mod_pdb_rcp_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_PDB_RCP, 0); + + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0); + hw_mod_hsh_rcp_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_HSH_RCP, 0); + + hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_PRESET_ALL, 0, 0); + hw_mod_flm_scrub_flush(&ndev->be, 0, 1); + flow_nic_free_resource(ndev, RES_SCRUB_RCP, 0); + + hw_db_inline_destroy(ndev->hw_db_handle); + +#ifdef FLOW_DEBUG + ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); +#endif + + ndev->flow_mgnt_prepared = 0; + } + + return 0; +} + +struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + uint16_t forced_vlan_vid __rte_unused, + uint16_t caller_id __rte_unused, + const struct rte_flow_item elem[] __rte_unused, + const struct rte_flow_action action[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + struct flow_handle *fh = NULL; + int res; + + uint32_t port_id = UINT32_MAX; + uint32_t num_dest_port; + uint32_t num_queues; + + uint32_t packet_data[10]; + uint32_t packet_mask[10]; + struct flm_flow_key_def_s key_def; + + struct rte_flow_attr attr_local; + memcpy(&attr_local, attr, sizeof(struct rte_flow_attr)); + uint16_t forced_vlan_vid_local = forced_vlan_vid; + uint16_t caller_id_local = caller_id; + + if (attr_local.group > 0) + forced_vlan_vid_local = 0; + + flow_nic_set_error(ERR_SUCCESS, error); + + struct nic_flow_def *fd = allocate_nic_flow_def(); + + if (fd == NULL) + goto err_exit0; + + res = interpret_flow_actions(dev, action, NULL, fd, error, &num_dest_port, &num_queues); + + if (res) + goto err_exit0; + + res = interpret_flow_elements(dev, elem, fd, error, forced_vlan_vid_local, &port_id, + packet_data, packet_mask, &key_def); + + if (res) + goto err_exit0; + + rte_spinlock_lock(&dev->ndev->mtx); + + /* Translate group IDs */ + if (fd->jump_to_group != UINT32_MAX && + flow_group_translate_get(dev->ndev->group_handle, caller_id_local, dev->port, + fd->jump_to_group, &fd->jump_to_group)) { + NT_LOG(ERR, FILTER, "ERROR: Could not get group resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto err_exit; + } + + if (attr_local.group > 0 && + flow_group_translate_get(dev->ndev->group_handle, caller_id_local, dev->port, + attr_local.group, &attr_local.group)) { + NT_LOG(ERR, FILTER, "ERROR: Could not get group resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto err_exit; + } + + if (port_id == UINT32_MAX) + port_id = dev->port_id; + + /* Create and flush filter to NIC */ + fh = create_flow_filter(dev, fd, &attr_local, forced_vlan_vid_local, + caller_id_local, error, port_id, num_dest_port, num_queues, packet_data, + packet_mask, &key_def); + + if (!fh) + goto err_exit; + + NT_LOG(DBG, FILTER, "New FlOW: fh (flow handle) %p, fd (flow definition) %p", fh, fd); + NT_LOG(DBG, FILTER, ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<", + dev, dev->ndev->adapter_no, dev->port, fh, fd); + + rte_spinlock_unlock(&dev->ndev->mtx); + + return fh; + +err_exit: + + if (fh) { + flow_destroy_locked_profile_inline(dev, fh, NULL); + fh = NULL; + } else { + free(fd); + fd = NULL; + } + + rte_spinlock_unlock(&dev->ndev->mtx); + +err_exit0: + if (fd) { + free(fd); + fd = NULL; + } + + NT_LOG(ERR, FILTER, "ERR: %s", __func__); + return NULL; +} + +int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *fh, + struct rte_flow_error *error) +{ + assert(dev); + assert(fh); + + int err = 0; + + flow_nic_set_error(ERR_SUCCESS, error); + + /* take flow out of ndev list - may not have been put there yet */ + if (fh->type == FLOW_HANDLE_TYPE_FLM) + nic_remove_flow_flm(dev->ndev, fh); + + else + nic_remove_flow(dev->ndev, fh); + +#ifdef FLOW_DEBUG + dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE); +#endif + + NT_LOG(DBG, FILTER, "removing flow :%p", fh); + if (fh->type == FLOW_HANDLE_TYPE_FLM) { + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)fh->flm_db_idxs, + fh->flm_db_idx_counter); + + flm_flow_programming(fh, NT_FLM_OP_UNLEARN); + + } else { + NT_LOG(DBG, FILTER, "removing flow :%p", fh); + + if (fh->fd->km.num_ftype_elem) { + km_clear_data_match_entry(&fh->fd->km); + + const struct hw_db_inline_km_rcp_data *other_km_rcp_data = + hw_db_inline_find_data(dev->ndev, dev->ndev->hw_db_handle, + HW_DB_IDX_TYPE_KM_RCP, + (struct hw_db_idx *)fh->flm_db_idxs, + fh->flm_db_idx_counter); + + if (other_km_rcp_data != NULL && + flow_nic_deref_resource(dev->ndev, RES_KM_CATEGORY, + (int)other_km_rcp_data->rcp) == 0) { + hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_PRESET_ALL, + (int)other_km_rcp_data->rcp, 0, 0); + hw_mod_km_rcp_flush(&dev->ndev->be, (int)other_km_rcp_data->rcp, + 1); + } + } + + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)fh->db_idxs, fh->db_idx_counter); + free(fh->fd); + fh->fd = NULL; + } + + if (err) { + NT_LOG(ERR, FILTER, "FAILED removing flow: %p", fh); + flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error); + } + + free(fh); + fh = NULL; + +#ifdef FLOW_DEBUG + dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); +#endif + + return err; +} + +int flow_destroy_profile_inline(struct flow_eth_dev *dev, struct flow_handle *flow, + struct rte_flow_error *error) +{ + int err = 0; + + if (flow && flow->type == FLOW_HANDLE_TYPE_FLM && flow->flm_async) + return flow_async_destroy_profile_inline(dev, 0, NULL, flow, NULL, error); + + flow_nic_set_error(ERR_SUCCESS, error); + + if (flow) { + /* Delete this flow */ + rte_spinlock_lock(&dev->ndev->mtx); + err = flow_destroy_locked_profile_inline(dev, flow, error); + rte_spinlock_unlock(&dev->ndev->mtx); + } + + return err; +} + +int flow_flush_profile_inline(struct flow_eth_dev *dev, + uint16_t caller_id, + struct rte_flow_error *error) +{ + int err = 0; + + flow_nic_set_error(ERR_SUCCESS, error); + + /* + * Delete all created FLM flows from this eth device. + * FLM flows must be deleted first because normal flows are their parents. + */ + struct flow_handle *flow = dev->ndev->flow_base_flm; + + while (flow && !err) { + if (flow->dev == dev && flow->caller_id == caller_id) { + struct flow_handle *flow_next = flow->next; + err = flow_destroy_profile_inline(dev, flow, error); + flow = flow_next; + + } else { + flow = flow->next; + } + } + + /* Delete all created flows from this eth device */ + flow = dev->ndev->flow_base; + + while (flow && !err) { + if (flow->dev == dev && flow->caller_id == caller_id) { + struct flow_handle *flow_next = flow->next; + err = flow_destroy_profile_inline(dev, flow, error); + flow = flow_next; + + } else { + flow = flow->next; + } + } + + return err; +} + +int flow_actions_update_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *flow, + const struct rte_flow_action action[], + struct rte_flow_error *error) +{ + assert(dev); + assert(flow); + + uint32_t num_dest_port = 0; + uint32_t num_queues = 0; + + int group = (int)flow->flm_kid - 2; + + flow_nic_set_error(ERR_SUCCESS, error); + + if (flow->type != FLOW_HANDLE_TYPE_FLM) { + NT_LOG(ERR, FILTER, + "Flow actions update not supported for group 0 or default flows"); + flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error); + return -1; + } + + struct nic_flow_def *fd = allocate_nic_flow_def(); + + if (fd == NULL) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Failed to allocate nic_flow_def"; + return -1; + } + + fd->non_empty = 1; + + int res = + interpret_flow_actions(dev, action, NULL, fd, error, &num_dest_port, &num_queues); + + if (res) { + free(fd); + return -1; + } + + rte_spinlock_lock(&dev->ndev->mtx); + + /* Setup new actions */ + uint32_t local_idx_counter = 0; + uint32_t local_idxs[RES_COUNT]; + memset(local_idxs, 0x0, sizeof(uint32_t) * RES_COUNT); + + struct hw_db_inline_qsl_data qsl_data; + setup_db_qsl_data(fd, &qsl_data, num_dest_port, num_queues); + + struct hw_db_inline_hsh_data hsh_data; + setup_db_hsh_data(fd, &hsh_data); + + { + uint32_t flm_ft = 0; + uint32_t flm_scrub = 0; + + /* Setup FLM RCP */ + const struct hw_db_inline_flm_rcp_data *flm_data = + hw_db_inline_find_data(dev->ndev, dev->ndev->hw_db_handle, + HW_DB_IDX_TYPE_FLM_RCP, + (struct hw_db_idx *)flow->flm_db_idxs, + flow->flm_db_idx_counter); + + if (flm_data == NULL) { + NT_LOG(ERR, FILTER, "Could not retrieve FLM RPC resource"); + flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error); + goto error_out; + } + + struct hw_db_flm_idx flm_idx = + hw_db_inline_flm_add(dev->ndev, dev->ndev->hw_db_handle, flm_data, group); + + local_idxs[local_idx_counter++] = flm_idx.raw; + + if (flm_idx.error) { + NT_LOG(ERR, FILTER, "Could not reference FLM RPC resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + if (setup_flow_flm_actions(dev, fd, &qsl_data, &hsh_data, group, local_idxs, + &local_idx_counter, &flow->flm_rpl_ext_ptr, &flm_ft, + &flm_scrub, error)) { + goto error_out; + } + + /* Update flow_handle */ + for (int i = 0; i < MAX_FLM_MTRS_SUPPORTED; ++i) { + struct flm_flow_mtr_handle_s *handle = dev->ndev->flm_mtr_handle; + struct flm_mtr_stat_s *mtr_stat = + handle->port_stats[flow->caller_id]->stats; + flow->flm_mtr_ids[i] = + fd->mtr_ids[i] == UINT32_MAX ? 0 : mtr_stat[fd->mtr_ids[i]].flm_id; + } + + for (unsigned int i = 0; i < fd->modify_field_count; ++i) { + switch (fd->modify_field[i].select) { + case CPY_SELECT_DSCP_IPV4: + + /* fallthrough */ + case CPY_SELECT_DSCP_IPV6: + flow->flm_dscp = fd->modify_field[i].value8[0]; + break; + + case CPY_SELECT_RQI_QFI: + flow->flm_rqi = (fd->modify_field[i].value8[0] >> 6) & 0x1; + flow->flm_qfi = fd->modify_field[i].value8[0] & 0x3f; + break; + + case CPY_SELECT_IPV4: + flow->flm_nat_ipv4 = ntohl(fd->modify_field[i].value32[0]); + break; + + case CPY_SELECT_PORT: + flow->flm_nat_port = ntohs(fd->modify_field[i].value16[0]); + break; + + case CPY_SELECT_TEID: + flow->flm_teid = ntohl(fd->modify_field[i].value32[0]); + break; + + default: + NT_LOG(DBG, FILTER, "Unknown modify field: %d", + fd->modify_field[i].select); + break; + } + } + + flow->flm_ft = (uint8_t)flm_ft; + flow->flm_scrub_prof = (uint8_t)flm_scrub; + flow->context = fd->age.context; + + /* Program flow */ + flm_flow_programming(flow, NT_FLM_OP_RELEARN); + + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)flow->flm_db_idxs, + flow->flm_db_idx_counter); + memset(flow->flm_db_idxs, 0x0, sizeof(struct hw_db_idx) * RES_COUNT); + + flow->flm_db_idx_counter = local_idx_counter; + + for (int i = 0; i < RES_COUNT; ++i) + flow->flm_db_idxs[i] = local_idxs[i]; + } + + rte_spinlock_unlock(&dev->ndev->mtx); + + free(fd); + return 0; + +error_out: + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, (struct hw_db_idx *)local_idxs, + local_idx_counter); + + rte_spinlock_unlock(&dev->ndev->mtx); + + free(fd); + return -1; +} + +static __rte_always_inline bool all_bits_enabled(uint64_t hash_mask, uint64_t hash_bits) +{ + return (hash_mask & hash_bits) == hash_bits; +} + +static __rte_always_inline void unset_bits(uint64_t *hash_mask, uint64_t hash_bits) +{ + *hash_mask &= ~hash_bits; +} + +static __rte_always_inline void unset_bits_and_log(uint64_t *hash_mask, uint64_t hash_bits) +{ + char rss_buffer[4096]; + uint16_t rss_buffer_len = sizeof(rss_buffer); + + if (sprint_nt_rss_mask(rss_buffer, rss_buffer_len, " ", *hash_mask & hash_bits) == 0) + NT_LOG(DBG, FILTER, "Configured RSS types:%s", rss_buffer); + + unset_bits(hash_mask, hash_bits); +} + +static __rte_always_inline void unset_bits_if_all_enabled(uint64_t *hash_mask, uint64_t hash_bits) +{ + if (all_bits_enabled(*hash_mask, hash_bits)) + unset_bits(hash_mask, hash_bits); +} + +int flow_nic_set_hasher_fields_inline(struct flow_nic_dev *ndev, int hsh_idx, + struct nt_eth_rss_conf rss_conf) +{ + uint64_t fields = rss_conf.rss_hf; + + char rss_buffer[4096]; + uint16_t rss_buffer_len = sizeof(rss_buffer); + + if (sprint_nt_rss_mask(rss_buffer, rss_buffer_len, " ", fields) == 0) + NT_LOG(DBG, FILTER, "Requested RSS types:%s", rss_buffer); + + /* + * configure all (Q)Words usable for hash calculation + * Hash can be calculated from 4 independent header parts: + * | QW0 | Qw4 | W8| W9| + * word | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | + */ + struct hsh_words words[HSH_WORDS_SIZE] = { + { 0, 5, HW_HSH_RCP_QW0_PE, HW_HSH_RCP_QW0_OFS, 128, true }, + { 4, 1, HW_HSH_RCP_QW4_PE, HW_HSH_RCP_QW4_OFS, 128, true }, + { 8, 0, HW_HSH_RCP_W8_PE, HW_HSH_RCP_W8_OFS, 32, true }, + { + 9, 255, HW_HSH_RCP_W9_PE, HW_HSH_RCP_W9_OFS, 32, + true + }, /* not supported for Toeplitz */ + }; + + int res = 0; + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0); + /* enable hashing */ + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx, 0, 2); + + /* configure selected hash function and its key */ + bool toeplitz = false; + + switch (rss_conf.algorithm) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + /* Use default NTH10 hashing algorithm */ + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TOEPLITZ, hsh_idx, 0, 0); + /* Use 1st 32-bits from rss_key to configure NTH10 SEED */ + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0, + rss_conf.rss_key[0] << 24 | rss_conf.rss_key[1] << 16 | + rss_conf.rss_key[2] << 8 | rss_conf.rss_key[3]); + break; + + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + toeplitz = true; + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TOEPLITZ, hsh_idx, 0, 1); + uint8_t empty_key = 0; + + /* Toeplitz key (always 40B) must be encoded from little to big endian */ + for (uint8_t i = 0; i <= (MAX_RSS_KEY_LEN - 8); i += 8) { + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, hsh_idx, i / 4, + rss_conf.rss_key[i + 4] << 24 | + rss_conf.rss_key[i + 5] << 16 | + rss_conf.rss_key[i + 6] << 8 | + rss_conf.rss_key[i + 7]); + NT_LOG(DBG, FILTER, + "hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, %d, %d, 0x%" PRIX32 + ")", + hsh_idx, i / 4, + rss_conf.rss_key[i + 4] << 24 | rss_conf.rss_key[i + 5] << 16 | + rss_conf.rss_key[i + 6] << 8 | rss_conf.rss_key[i + 7]); + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, hsh_idx, i / 4 + 1, + rss_conf.rss_key[i] << 24 | + rss_conf.rss_key[i + 1] << 16 | + rss_conf.rss_key[i + 2] << 8 | + rss_conf.rss_key[i + 3]); + NT_LOG(DBG, FILTER, + "hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_K, %d, %d, 0x%" PRIX32 + ")", + hsh_idx, i / 4 + 1, + rss_conf.rss_key[i] << 24 | rss_conf.rss_key[i + 1] << 16 | + rss_conf.rss_key[i + 2] << 8 | rss_conf.rss_key[i + 3]); + empty_key |= rss_conf.rss_key[i] | rss_conf.rss_key[i + 1] | + rss_conf.rss_key[i + 2] | rss_conf.rss_key[i + 3] | + rss_conf.rss_key[i + 4] | rss_conf.rss_key[i + 5] | + rss_conf.rss_key[i + 6] | rss_conf.rss_key[i + 7]; + } + + if (empty_key == 0) { + NT_LOG(ERR, FILTER, + "Toeplitz key must be configured. Key with all bytes set to zero is not allowed."); + return -1; + } + + words[HSH_WORDS_W9].free = false; + NT_LOG(DBG, FILTER, + "Toeplitz hashing is enabled thus W9 and P_MASK cannot be used."); + break; + + default: + NT_LOG(ERR, FILTER, "Unknown hashing function %d requested", rss_conf.algorithm); + return -1; + } + + /* indication that some IPv6 flag is present */ + bool ipv6 = fields & (NT_ETH_RSS_IPV6_MASK); + /* store proto mask for later use at IP and L4 checksum handling */ + uint64_t l4_proto_mask = fields & + (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV4_OTHER | + RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP | + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_OTHER | + RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX); + + /* outermost headers are used by default, so innermost bit takes precedence if detected */ + bool outer = (fields & RTE_ETH_RSS_LEVEL_INNERMOST) ? false : true; + unset_bits(&fields, RTE_ETH_RSS_LEVEL_MASK); + + if (fields == 0) { + NT_LOG(ERR, FILTER, "RSS hash configuration 0x%" PRIX64 " is not valid.", + rss_conf.rss_hf); + return -1; + } + + /* indication that IPv4 `protocol` or IPv6 `next header` fields shall be part of the hash + */ + bool l4_proto_hash = false; + + /* + * check if SRC_ONLY & DST_ONLY are used simultaneously; + * According to DPDK, we shall behave like none of these bits is set + */ + unset_bits_if_all_enabled(&fields, RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY); + unset_bits_if_all_enabled(&fields, RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY); + unset_bits_if_all_enabled(&fields, RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY); + + /* L2 */ + if (fields & (RTE_ETH_RSS_ETH | RTE_ETH_RSS_L2_SRC_ONLY | RTE_ETH_RSS_L2_DST_ONLY)) { + if (outer) { + if (fields & RTE_ETH_RSS_L2_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set outer src MAC hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L2, 6, 48, toeplitz); + + } else if (fields & RTE_ETH_RSS_L2_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set outer dst MAC hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L2, 0, 48, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set outer src & dst MAC hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L2, 0, 96, toeplitz); + } + + } else if (fields & RTE_ETH_RSS_L2_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set inner src MAC hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L2, 6, + 48, toeplitz); + + } else if (fields & RTE_ETH_RSS_L2_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set inner dst MAC hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L2, 0, + 48, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner src & dst MAC hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L2, 0, + 96, toeplitz); + } + + unset_bits_and_log(&fields, + RTE_ETH_RSS_ETH | RTE_ETH_RSS_L2_SRC_ONLY | + RTE_ETH_RSS_L2_DST_ONLY); + } + + /* + * VLAN support of multiple VLAN headers, + * where S-VLAN is the first and C-VLAN the last VLAN header + */ + if (fields & RTE_ETH_RSS_C_VLAN) { + /* + * use MPLS protocol offset, which points just after ethertype with relative + * offset -6 (i.e. 2 bytes + * of ethertype & size + 4 bytes of VLAN header field) to access last vlan header + */ + if (outer) { + NT_LOG(DBG, FILTER, "Set outer C-VLAN hasher."); + /* + * use whole 32-bit 802.1a tag - backward compatible + * with VSWITCH implementation + */ + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_MPLS, -6, + 32, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner C-VLAN hasher."); + /* + * use whole 32-bit 802.1a tag - backward compatible + * with VSWITCH implementation + */ + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_MPLS, + -6, 32, toeplitz); + } + + unset_bits_and_log(&fields, RTE_ETH_RSS_C_VLAN); + } + + if (fields & RTE_ETH_RSS_S_VLAN) { + if (outer) { + NT_LOG(DBG, FILTER, "Set outer S-VLAN hasher."); + /* + * use whole 32-bit 802.1a tag - backward compatible + * with VSWITCH implementation + */ + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_FIRST_VLAN, 0, 32, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner S-VLAN hasher."); + /* + * use whole 32-bit 802.1a tag - backward compatible + * with VSWITCH implementation + */ + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_VLAN, + 0, 32, toeplitz); + } + + unset_bits_and_log(&fields, RTE_ETH_RSS_S_VLAN); + } + /* L2 payload */ + /* calculate hash of 128-bits of l2 payload; Use MPLS protocol offset to address the + * beginning of L2 payload even if MPLS header is not present + */ + if (fields & RTE_ETH_RSS_L2_PAYLOAD) { + uint64_t outer_fields_enabled = 0; + + if (outer) { + NT_LOG(DBG, FILTER, "Set outer L2 payload hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_MPLS, 0, + 128, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner L2 payload hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_MPLS, + 0, 128, toeplitz); + outer_fields_enabled = fields & RTE_ETH_RSS_GTPU; + } + + /* + * L2 PAYLOAD hashing overrides all L3 & L4 RSS flags. + * Thus we can clear all remaining (supported) + * RSS flags... + */ + unset_bits_and_log(&fields, NT_ETH_RSS_OFFLOAD_MASK); + /* + * ...but in case of INNER L2 PAYLOAD we must process + * "always outer" GTPU field if enabled + */ + fields |= outer_fields_enabled; + } + + /* L3 + L4 protocol number */ + if (fields & RTE_ETH_RSS_IPV4_CHKSUM) { + /* only IPv4 checksum is supported by DPDK RTE_ETH_RSS_* types */ + if (ipv6) { + NT_LOG(ERR, FILTER, + "RSS: IPv4 checksum requested with IPv6 header hashing!"); + res = 1; + + } else if (outer) { + NT_LOG(DBG, FILTER, "Set outer IPv4 checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_L3, 10, + 16, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner IPv4 checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L3, + 10, 16, toeplitz); + } + + /* + * L3 checksum is made from whole L3 header, i.e. no need to process other + * L3 hashing flags + */ + unset_bits_and_log(&fields, RTE_ETH_RSS_IPV4_CHKSUM | NT_ETH_RSS_IP_MASK); + } + + if (fields & NT_ETH_RSS_IP_MASK) { + if (ipv6) { + if (outer) { + if (fields & RTE_ETH_RSS_L3_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set outer IPv6/IPv4 src hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_FINAL_IP_DST, + -16, 128, toeplitz); + + } else if (fields & RTE_ETH_RSS_L3_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set outer IPv6/IPv4 dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_FINAL_IP_DST, 0, + 128, toeplitz); + + } else { + NT_LOG(DBG, FILTER, + "Set outer IPv6/IPv4 src & dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_FINAL_IP_DST, + -16, 128, toeplitz); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_FINAL_IP_DST, 0, + 128, toeplitz); + } + + } else if (fields & RTE_ETH_RSS_L3_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set inner IPv6/IPv4 src hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_FINAL_IP_DST, -16, + 128, toeplitz); + + } else if (fields & RTE_ETH_RSS_L3_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set inner IPv6/IPv4 dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_FINAL_IP_DST, 0, + 128, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner IPv6/IPv4 src & dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_FINAL_IP_DST, -16, + 128, toeplitz); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_FINAL_IP_DST, 0, + 128, toeplitz); + } + + /* check if fragment ID shall be part of hash */ + if (fields & (RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_FRAG_IPV6)) { + if (outer) { + NT_LOG(DBG, FILTER, + "Set outer IPv6/IPv4 fragment ID hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_ID_IPV4_6, 0, + 32, toeplitz); + + } else { + NT_LOG(DBG, FILTER, + "Set inner IPv6/IPv4 fragment ID hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_TUN_ID_IPV4_6, + 0, 32, toeplitz); + } + } + + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK, hsh_idx, 0, + 1); + + } else { + /* IPv4 */ + if (outer) { + if (fields & RTE_ETH_RSS_L3_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set outer IPv4 src only hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_L3, 12, + 32, toeplitz); + + } else if (fields & RTE_ETH_RSS_L3_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set outer IPv4 dst only hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_L3, 16, + 32, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set outer IPv4 src & dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_L3, 12, + 64, toeplitz); + } + + } else if (fields & RTE_ETH_RSS_L3_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set inner IPv4 src only hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_L3, 12, 32, + toeplitz); + + } else if (fields & RTE_ETH_RSS_L3_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set inner IPv4 dst only hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_L3, 16, 32, + toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner IPv4 src & dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_L3, 12, 64, + toeplitz); + } + + /* check if fragment ID shall be part of hash */ + if (fields & RTE_ETH_RSS_FRAG_IPV4) { + if (outer) { + NT_LOG(DBG, FILTER, + "Set outer IPv4 fragment ID hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_ID_IPV4_6, 0, + 16, toeplitz); + + } else { + NT_LOG(DBG, FILTER, + "Set inner IPv4 fragment ID hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, + DYN_TUN_ID_IPV4_6, + 0, 16, toeplitz); + } + } + } + + /* check if L4 protocol type shall be part of hash */ + if (l4_proto_mask) + l4_proto_hash = true; + + unset_bits_and_log(&fields, NT_ETH_RSS_IP_MASK); + } + + /* L4 */ + if (fields & (RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)) { + if (outer) { + if (fields & RTE_ETH_RSS_L4_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set outer L4 src hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L4, 0, 16, toeplitz); + + } else if (fields & RTE_ETH_RSS_L4_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set outer L4 dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L4, 2, 16, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set outer L4 src & dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L4, 0, 32, toeplitz); + } + + } else if (fields & RTE_ETH_RSS_L4_SRC_ONLY) { + NT_LOG(DBG, FILTER, "Set inner L4 src hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L4, 0, + 16, toeplitz); + + } else if (fields & RTE_ETH_RSS_L4_DST_ONLY) { + NT_LOG(DBG, FILTER, "Set inner L4 dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L4, 2, + 16, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner L4 src & dst hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_TUN_L4, 0, + 32, toeplitz); + } + + l4_proto_hash = true; + unset_bits_and_log(&fields, + RTE_ETH_RSS_PORT | RTE_ETH_RSS_L4_SRC_ONLY | + RTE_ETH_RSS_L4_DST_ONLY); + } + + /* IPv4 protocol / IPv6 next header fields */ + if (l4_proto_hash) { + /* NOTE: HW_HSH_RCP_P_MASK is not supported for Toeplitz and thus one of SW0, SW4 + * or W8 must be used to hash on `protocol` field of IPv4 or `next header` field of + * IPv6 header. + */ + if (outer) { + NT_LOG(DBG, FILTER, "Set outer L4 protocol type / next header hasher."); + + if (toeplitz) { + if (ipv6) { + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_L3, 6, 8, + toeplitz); + + } else { + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_L3, 9, 8, + toeplitz); + } + + } else { + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, + 1); + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TNL_P, hsh_idx, 0, + 0); + } + + } else { + NT_LOG(DBG, FILTER, "Set inner L4 protocol type / next header hasher."); + + if (toeplitz) { + if (ipv6) { + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_TUN_L3, + 6, 8, toeplitz); + + } else { + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, + words, DYN_TUN_L3, + 9, 8, toeplitz); + } + + } else { + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, + 1); + res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_TNL_P, hsh_idx, 0, + 1); + } + } + + l4_proto_hash = false; + } + + /* + * GTPU - for UPF use cases we always use TEID from outermost GTPU header + * even if other headers are innermost + */ + if (fields & RTE_ETH_RSS_GTPU) { + NT_LOG(DBG, FILTER, "Set outer GTPU TEID hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, DYN_L4_PAYLOAD, 4, 32, + toeplitz); + unset_bits_and_log(&fields, RTE_ETH_RSS_GTPU); + } + + /* Checksums */ + /* only UDP, TCP and SCTP checksums are supported */ + if (fields & RTE_ETH_RSS_L4_CHKSUM) { + switch (l4_proto_mask) { + case RTE_ETH_RSS_NONFRAG_IPV4_UDP: + case RTE_ETH_RSS_NONFRAG_IPV6_UDP: + case RTE_ETH_RSS_IPV6_UDP_EX: + case RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP: + case RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_IPV6_UDP_EX: + case RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX: + case RTE_ETH_RSS_UDP_COMBINED: + if (outer) { + NT_LOG(DBG, FILTER, "Set outer UDP checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L4, 6, 16, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner UDP checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_L4, 6, 16, + toeplitz); + } + + unset_bits_and_log(&fields, RTE_ETH_RSS_L4_CHKSUM | l4_proto_mask); + break; + + case RTE_ETH_RSS_NONFRAG_IPV4_TCP: + case RTE_ETH_RSS_NONFRAG_IPV6_TCP: + case RTE_ETH_RSS_IPV6_TCP_EX: + case RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP: + case RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_IPV6_TCP_EX: + case RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX: + case RTE_ETH_RSS_TCP_COMBINED: + if (outer) { + NT_LOG(DBG, FILTER, "Set outer TCP checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L4, 16, 16, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner TCP checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_L4, 16, 16, + toeplitz); + } + + unset_bits_and_log(&fields, RTE_ETH_RSS_L4_CHKSUM | l4_proto_mask); + break; + + case RTE_ETH_RSS_NONFRAG_IPV4_SCTP: + case RTE_ETH_RSS_NONFRAG_IPV6_SCTP: + case RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_NONFRAG_IPV6_SCTP: + if (outer) { + NT_LOG(DBG, FILTER, "Set outer SCTP checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_L4, 8, 32, toeplitz); + + } else { + NT_LOG(DBG, FILTER, "Set inner SCTP checksum hasher."); + res |= flow_nic_set_hasher_part_inline(ndev, hsh_idx, words, + DYN_TUN_L4, 8, 32, + toeplitz); + } + + unset_bits_and_log(&fields, RTE_ETH_RSS_L4_CHKSUM | l4_proto_mask); + break; + + case RTE_ETH_RSS_NONFRAG_IPV4_OTHER: + case RTE_ETH_RSS_NONFRAG_IPV6_OTHER: + + /* none or unsupported protocol was chosen */ + case 0: + NT_LOG(ERR, FILTER, + "L4 checksum hashing is supported only for UDP, TCP and SCTP protocols"); + res = -1; + break; + + /* multiple L4 protocols were selected */ + default: + NT_LOG(ERR, FILTER, + "L4 checksum hashing can be enabled just for one of UDP, TCP or SCTP protocols"); + res = -1; + break; + } + } + + if (fields || res != 0) { + hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0); + + if (sprint_nt_rss_mask(rss_buffer, rss_buffer_len, " ", rss_conf.rss_hf) == 0) { + NT_LOG(ERR, FILTER, + "RSS configuration%s is not supported for hash func %s.", + rss_buffer, + (enum rte_eth_hash_function)toeplitz ? "Toeplitz" : "NTH10"); + + } else { + NT_LOG(ERR, FILTER, + "RSS configuration 0x%" PRIX64 + " is not supported for hash func %s.", + rss_conf.rss_hf, + (enum rte_eth_hash_function)toeplitz ? "Toeplitz" : "NTH10"); + } + + return -1; + } + + return res; +} + +static void dump_flm_data(const uint32_t *data, FILE *file) +{ + for (unsigned int i = 0; i < 10; ++i) { + fprintf(file, "%s%02X %02X %02X %02X%s", i % 2 ? "" : " ", + (data[i] >> 24) & 0xff, (data[i] >> 16) & 0xff, (data[i] >> 8) & 0xff, + data[i] & 0xff, i % 2 ? "\n" : " "); + } +} + +int flow_get_aged_flows_profile_inline(struct flow_eth_dev *dev, + uint16_t caller_id, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error) +{ + (void)dev; + flow_nic_set_error(ERR_SUCCESS, error); + + unsigned int queue_size = flm_age_queue_get_size(caller_id); + + if (queue_size == 0) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Aged queue size is not configured"; + return -1; + } + + unsigned int queue_count = flm_age_queue_count(caller_id); + + if (context == NULL) + return queue_count; + + if (queue_count < nb_contexts) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Aged queue size contains fewer records than the expected output"; + return -1; + } + + if (queue_size < nb_contexts) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Defined aged queue size is smaller than the expected output"; + return -1; + } + + uint32_t idx; + + for (idx = 0; idx < nb_contexts; ++idx) { + struct flm_age_event_s obj; + int ret = flm_age_queue_get(caller_id, &obj); + + if (ret != 0) + break; + + context[idx] = obj.context; + } + + return idx; +} + +int flow_dev_dump_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *flow, + uint16_t caller_id, + FILE *file, + struct rte_flow_error *error) +{ + flow_nic_set_error(ERR_SUCCESS, error); + + rte_spinlock_lock(&dev->ndev->mtx); + + if (flow != NULL) { + if (flow->type == FLOW_HANDLE_TYPE_FLM) { + fprintf(file, "Port %d, caller %d, flow type FLM\n", (int)dev->port_id, + (int)flow->caller_id); + fprintf(file, " FLM_DATA:\n"); + dump_flm_data(flow->flm_data, file); + hw_db_inline_dump(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)flow->flm_db_idxs, + flow->flm_db_idx_counter, file); + fprintf(file, " Context: %p\n", flow->context); + + } else { + fprintf(file, "Port %d, caller %d, flow type FLOW\n", (int)dev->port_id, + (int)flow->caller_id); + hw_db_inline_dump(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)flow->db_idxs, flow->db_idx_counter, + file); + } + + } else { + int max_flm_count = 1000; + + hw_db_inline_dump_cfn(dev->ndev, dev->ndev->hw_db_handle, file); + + flow = dev->ndev->flow_base; + + while (flow) { + if (flow->caller_id == caller_id) { + fprintf(file, "Port %d, caller %d, flow type FLOW\n", + (int)dev->port_id, (int)flow->caller_id); + hw_db_inline_dump(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)flow->db_idxs, + flow->db_idx_counter, file); + } + + flow = flow->next; + } + + flow = dev->ndev->flow_base_flm; + + while (flow && max_flm_count >= 0) { + if (flow->caller_id == caller_id) { + fprintf(file, "Port %d, caller %d, flow type FLM\n", + (int)dev->port_id, (int)flow->caller_id); + fprintf(file, " FLM_DATA:\n"); + dump_flm_data(flow->flm_data, file); + hw_db_inline_dump(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)flow->flm_db_idxs, + flow->flm_db_idx_counter, file); + fprintf(file, " Context: %p\n", flow->context); + max_flm_count -= 1; + } + + flow = flow->next; + } + } + + rte_spinlock_unlock(&dev->ndev->mtx); + + return 0; +} + +int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size) +{ + const enum hw_flm_e fields[] = { + HW_FLM_STAT_FLOWS, HW_FLM_STAT_LRN_DONE, HW_FLM_STAT_LRN_IGNORE, + HW_FLM_STAT_LRN_FAIL, HW_FLM_STAT_UNL_DONE, HW_FLM_STAT_UNL_IGNORE, + HW_FLM_STAT_AUL_DONE, HW_FLM_STAT_AUL_IGNORE, HW_FLM_STAT_AUL_FAIL, + HW_FLM_STAT_TUL_DONE, HW_FLM_STAT_REL_DONE, HW_FLM_STAT_REL_IGNORE, + HW_FLM_STAT_PRB_DONE, HW_FLM_STAT_PRB_IGNORE, + + HW_FLM_STAT_STA_DONE, HW_FLM_STAT_INF_DONE, HW_FLM_STAT_INF_SKIP, + HW_FLM_STAT_PCK_HIT, HW_FLM_STAT_PCK_MISS, HW_FLM_STAT_PCK_UNH, + HW_FLM_STAT_PCK_DIS, HW_FLM_STAT_CSH_HIT, HW_FLM_STAT_CSH_MISS, + HW_FLM_STAT_CSH_UNH, HW_FLM_STAT_CUC_START, HW_FLM_STAT_CUC_MOVE, + + HW_FLM_LOAD_LPS, HW_FLM_LOAD_APS, + }; + + const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e); + + if (!ndev->flow_mgnt_prepared) + return 0; + + if (size < fields_cnt) + return -1; + + hw_mod_flm_stat_update(&ndev->be); + + for (uint64_t i = 0; i < fields_cnt; ++i) { + uint32_t value = 0; + hw_mod_flm_stat_get(&ndev->be, fields[i], &value); + data[i] = (fields[i] == HW_FLM_STAT_FLOWS || fields[i] == HW_FLM_LOAD_LPS || + fields[i] == HW_FLM_LOAD_APS) + ? value + : data[i] + value; + + if (ndev->be.flm.ver < 18 && fields[i] == HW_FLM_STAT_PRB_IGNORE) + break; + } + + return 0; +} + +int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu) +{ + if (port >= 255) + return -1; + + uint32_t ipv4_en_frag; + uint32_t ipv4_action; + uint32_t ipv6_en_frag; + uint32_t ipv6_action; + + if (port == 0) { + ipv4_en_frag = PORT_0_IPV4_FRAGMENTATION; + ipv4_action = PORT_0_IPV4_DF_ACTION; + ipv6_en_frag = PORT_0_IPV6_FRAGMENTATION; + ipv6_action = PORT_0_IPV6_ACTION; + + } else if (port == 1) { + ipv4_en_frag = PORT_1_IPV4_FRAGMENTATION; + ipv4_action = PORT_1_IPV4_DF_ACTION; + ipv6_en_frag = PORT_1_IPV6_FRAGMENTATION; + ipv6_action = PORT_1_IPV6_ACTION; + + } else { + ipv4_en_frag = DISABLE_FRAGMENTATION; + ipv4_action = IPV4_DF_DROP; + ipv6_en_frag = DISABLE_FRAGMENTATION; + ipv6_action = IPV6_DROP; + } + + int err = 0; + uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port); + struct flow_nic_dev *ndev = dev->ndev; + + err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_EN, ifr_mtu_recipe, + ipv4_en_frag); + err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_EN, ifr_mtu_recipe, + ipv6_en_frag); + err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU, ifr_mtu_recipe, mtu); + err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_DF_DROP, ifr_mtu_recipe, + ipv4_action); + err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_DROP, ifr_mtu_recipe, + ipv6_action); + + err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_EN, ifr_mtu_recipe, + ipv4_en_frag); + err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_EN, ifr_mtu_recipe, + ipv6_en_frag); + err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU, ifr_mtu_recipe, mtu); + err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV4_DF_DROP, ifr_mtu_recipe, + ipv4_action); + err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_IPV6_DROP, ifr_mtu_recipe, + ipv6_action); + + if (err == 0) { + err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1); + err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1); + } + + return err; +} + +int flow_info_get_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, struct rte_flow_error *error) +{ + (void)queue_info; + (void)caller_id; + int res = 0; + + flow_nic_set_error(ERR_SUCCESS, error); + memset(port_info, 0, sizeof(struct rte_flow_port_info)); + + port_info->max_nb_aging_objects = dev->nb_aging_objects; + + struct flm_flow_mtr_handle_s *mtr_handle = dev->ndev->flm_mtr_handle; + + if (mtr_handle) + port_info->max_nb_meters = mtr_handle->port_stats[caller_id]->size; + + return res; +} + +int flow_configure_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id, + const struct rte_flow_port_attr *port_attr, uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error) +{ + (void)nb_queue; + (void)queue_attr; + int res = 0; + + flow_nic_set_error(ERR_SUCCESS, error); + + if (port_attr->nb_aging_objects > 0) { + if (dev->nb_aging_objects > 0) { + flm_age_queue_free(dev->port_id, caller_id); + dev->nb_aging_objects = 0; + } + + struct rte_ring *age_queue = + flm_age_queue_create(dev->port_id, caller_id, port_attr->nb_aging_objects); + + if (age_queue == NULL) { + error->message = "Failed to allocate aging objects"; + goto error_out; + } + + dev->nb_aging_objects = port_attr->nb_aging_objects; + } + + if (port_attr->nb_meters > 0) { + struct flm_flow_mtr_handle_s *mtr_handle = dev->ndev->flm_mtr_handle; + + if (mtr_handle->port_stats[caller_id]->shared == 1) { + res = realloc(mtr_handle->port_stats[caller_id]->stats, + port_attr->nb_meters) == NULL + ? -1 + : 0; + mtr_handle->port_stats[caller_id]->size = port_attr->nb_meters; + + } else { + mtr_handle->port_stats[caller_id] = + calloc(1, sizeof(struct flm_mtr_shared_stats_s)); + struct flm_mtr_stat_s *stats = + calloc(port_attr->nb_meters, sizeof(struct flm_mtr_stat_s)); + + if (mtr_handle->port_stats[caller_id] == NULL || stats == NULL) { + free(mtr_handle->port_stats[caller_id]); + free(stats); + error->message = "Failed to allocate meter actions"; + goto error_out; + } + + mtr_handle->port_stats[caller_id]->stats = stats; + mtr_handle->port_stats[caller_id]->size = port_attr->nb_meters; + mtr_handle->port_stats[caller_id]->shared = 1; + } + } + + return res; + +error_out: + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + + if (port_attr->nb_aging_objects > 0) { + flm_age_queue_free(dev->port_id, caller_id); + dev->nb_aging_objects = 0; + } + + return -1; +} + +struct flow_pattern_template *flow_pattern_template_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_item pattern[], struct rte_flow_error *error) +{ + (void)template_attr; + (void)caller_id; + uint32_t port_id = 0; + uint32_t packet_data[10]; + uint32_t packet_mask[10]; + struct flm_flow_key_def_s key_def; + + struct nic_flow_def *fd = allocate_nic_flow_def(); + + flow_nic_set_error(ERR_SUCCESS, error); + + if (fd == NULL) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Failed to allocate flow_def"; + return NULL; + } + + /* Note that forced_vlan_vid is unavailable at this point in time */ + int res = interpret_flow_elements(dev, pattern, fd, error, 0, &port_id, packet_data, + packet_mask, &key_def); + + if (res) { + free(fd); + return NULL; + } + + struct flow_pattern_template *template = calloc(1, sizeof(struct flow_pattern_template)); + + template->fd = fd; + + return template; +} + +int flow_pattern_template_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_pattern_template *pattern_template, + struct rte_flow_error *error) +{ + (void)dev; + flow_nic_set_error(ERR_SUCCESS, error); + + free(pattern_template->fd); + free(pattern_template); + + return 0; +} + +struct flow_actions_template * +flow_actions_template_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_actions_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_action actions[], + const struct rte_flow_action masks[], + struct rte_flow_error *error) +{ + (void)template_attr; + int res; + + uint32_t num_dest_port = 0; + uint32_t num_queues = 0; + + struct nic_flow_def *fd = allocate_nic_flow_def(); + + flow_nic_set_error(ERR_SUCCESS, error); + + if (fd == NULL) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Failed to allocate flow_def"; + return NULL; + } + + res = interpret_flow_actions(dev, actions, masks, fd, error, &num_dest_port, &num_queues); + + if (res) { + free(fd); + return NULL; + } + + /* Translate group IDs */ + if (fd->jump_to_group != UINT32_MAX) { + rte_spinlock_lock(&dev->ndev->mtx); + res = flow_group_translate_get(dev->ndev->group_handle, caller_id, + dev->port, fd->jump_to_group, &fd->jump_to_group); + rte_spinlock_unlock(&dev->ndev->mtx); + + if (res) { + NT_LOG(ERR, FILTER, "ERROR: Could not get group resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + free(fd); + return NULL; + } + } + + struct flow_actions_template *template = calloc(1, sizeof(struct flow_actions_template)); + + template->fd = fd; + template->num_dest_port = num_dest_port; + template->num_queues = num_queues; + + return template; +} + +int flow_actions_template_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_actions_template *actions_template, + struct rte_flow_error *error) +{ + (void)dev; + flow_nic_set_error(ERR_SUCCESS, error); + + free(actions_template->fd); + free(actions_template); + + return 0; +} + +struct flow_template_table *flow_template_table_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_template_table_attr *table_attr, uint16_t forced_vlan_vid, + uint16_t caller_id, + struct flow_pattern_template *pattern_templates[], uint8_t nb_pattern_templates, + struct flow_actions_template *actions_templates[], uint8_t nb_actions_templates, + struct rte_flow_error *error) +{ + flow_nic_set_error(ERR_SUCCESS, error); + + struct flow_template_table *template_table = calloc(1, sizeof(struct flow_template_table)); + + if (template_table == NULL) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Failed to allocate template_table"; + goto error_out; + } + + template_table->pattern_templates = + malloc(sizeof(struct flow_pattern_template *) * nb_pattern_templates); + template_table->actions_templates = + malloc(sizeof(struct flow_actions_template *) * nb_actions_templates); + template_table->pattern_action_pairs = + calloc((uint32_t)nb_pattern_templates * nb_actions_templates, + sizeof(struct flow_template_table_cell)); + + if (template_table->pattern_templates == NULL || + template_table->actions_templates == NULL || + template_table->pattern_action_pairs == NULL) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Failed to allocate template_table variables"; + goto error_out; + } + + template_table->attr.priority = table_attr->flow_attr.priority; + template_table->attr.group = table_attr->flow_attr.group; + template_table->forced_vlan_vid = forced_vlan_vid; + template_table->caller_id = caller_id; + + template_table->nb_pattern_templates = nb_pattern_templates; + template_table->nb_actions_templates = nb_actions_templates; + + memcpy(template_table->pattern_templates, pattern_templates, + sizeof(struct flow_pattern_template *) * nb_pattern_templates); + memcpy(template_table->actions_templates, actions_templates, + sizeof(struct rte_flow_actions_template *) * nb_actions_templates); + + rte_spinlock_lock(&dev->ndev->mtx); + int res = + flow_group_translate_get(dev->ndev->group_handle, caller_id, dev->port, + template_table->attr.group, &template_table->attr.group); + rte_spinlock_unlock(&dev->ndev->mtx); + + /* Translate group IDs */ + if (res) { + NT_LOG(ERR, FILTER, "ERROR: Could not get group resource"); + flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error); + goto error_out; + } + + return template_table; + +error_out: + + if (template_table) { + free(template_table->pattern_templates); + free(template_table->actions_templates); + free(template_table->pattern_action_pairs); + free(template_table); + } + + return NULL; +} + +int flow_template_table_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_template_table *template_table, + struct rte_flow_error *error) +{ + flow_nic_set_error(ERR_SUCCESS, error); + + const uint32_t nb_cells = + template_table->nb_pattern_templates * template_table->nb_actions_templates; + + for (uint32_t i = 0; i < nb_cells; ++i) { + struct flow_template_table_cell *cell = &template_table->pattern_action_pairs[i]; + + if (cell->flm_db_idx_counter > 0) { + hw_db_inline_deref_idxs(dev->ndev, dev->ndev->hw_db_handle, + (struct hw_db_idx *)cell->flm_db_idxs, + cell->flm_db_idx_counter); + } + } + + free(template_table->pattern_templates); + free(template_table->actions_templates); + free(template_table->pattern_action_pairs); + free(template_table); + + return 0; +} + +struct flow_handle *flow_async_create_profile_inline(struct flow_eth_dev *dev, + uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct flow_template_table *template_table, + const struct rte_flow_item pattern[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t actions_template_index, + void *user_data, + struct rte_flow_error *error) +{ + (void)queue_id; + (void)op_attr; + struct flow_handle *fh = NULL; + int res, status; + + const uint32_t pattern_action_index = + (uint32_t)template_table->nb_actions_templates * pattern_template_index + + actions_template_index; + struct flow_template_table_cell *pattern_action_pair = + &template_table->pattern_action_pairs[pattern_action_index]; + + uint32_t num_dest_port = + template_table->actions_templates[actions_template_index]->num_dest_port; + uint32_t num_queues = + template_table->actions_templates[actions_template_index]->num_queues; + + uint32_t port_id = UINT32_MAX; + uint32_t packet_data[10]; + uint32_t packet_mask[10]; + struct flm_flow_key_def_s key_def; + + flow_nic_set_error(ERR_SUCCESS, error); + + struct nic_flow_def *fd = malloc(sizeof(struct nic_flow_def)); + + if (fd == NULL) { + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + error->message = "Failed to allocate flow_def"; + goto err_exit; + } + + memcpy(fd, template_table->actions_templates[actions_template_index]->fd, + sizeof(struct nic_flow_def)); + + res = interpret_flow_elements(dev, pattern, fd, error, + template_table->forced_vlan_vid, &port_id, packet_data, + packet_mask, &key_def); + + if (res) + goto err_exit; + + if (port_id == UINT32_MAX) + port_id = dev->port_id; + + { + uint32_t num_dest_port_tmp = 0; + uint32_t num_queues_tmp = 0; + + struct nic_flow_def action_fd = { 0 }; + prepare_nic_flow_def(&action_fd); + + res = interpret_flow_actions(dev, actions, NULL, &action_fd, error, + &num_dest_port_tmp, &num_queues_tmp); + + if (res) + goto err_exit; + + /* Copy FLM unique actions: modify_field, meter, encap/decap and age */ + memcpy_or(fd->mtr_ids, action_fd.mtr_ids, sizeof(action_fd.mtr_ids)); + memcpy_or(&fd->tun_hdr, &action_fd.tun_hdr, sizeof(struct tunnel_header_s)); + memcpy_or(fd->modify_field, action_fd.modify_field, + sizeof(action_fd.modify_field)); + fd->modify_field_count = action_fd.modify_field_count; + memcpy_or(&fd->age, &action_fd.age, sizeof(struct rte_flow_action_age)); + } + + status = atomic_load(&pattern_action_pair->status); + + /* Initializing template entry */ + if (status < CELL_STATUS_INITIALIZED_TYPE_FLOW) { + if (status == CELL_STATUS_UNINITIALIZED && + atomic_compare_exchange_strong(&pattern_action_pair->status, &status, + CELL_STATUS_INITIALIZING)) { + rte_spinlock_lock(&dev->ndev->mtx); + + fh = create_flow_filter(dev, fd, &template_table->attr, + template_table->forced_vlan_vid, template_table->caller_id, + error, port_id, num_dest_port, num_queues, packet_data, + packet_mask, &key_def); + + rte_spinlock_unlock(&dev->ndev->mtx); + + if (fh == NULL) { + /* reset status to CELL_STATUS_UNINITIALIZED to avoid a deadlock */ + atomic_store(&pattern_action_pair->status, + CELL_STATUS_UNINITIALIZED); + goto err_exit; + } + + if (fh->type == FLOW_HANDLE_TYPE_FLM) { + rte_spinlock_lock(&dev->ndev->mtx); + + struct hw_db_idx *flm_ft_idx = + hw_db_inline_find_idx(dev->ndev, dev->ndev->hw_db_handle, + HW_DB_IDX_TYPE_FLM_FT, + (struct hw_db_idx *)fh->flm_db_idxs, + fh->flm_db_idx_counter); + + rte_spinlock_unlock(&dev->ndev->mtx); + + pattern_action_pair->flm_db_idx_counter = fh->flm_db_idx_counter; + memcpy(pattern_action_pair->flm_db_idxs, fh->flm_db_idxs, + sizeof(struct hw_db_idx) * fh->flm_db_idx_counter); + + pattern_action_pair->flm_key_id = fh->flm_kid; + pattern_action_pair->flm_ft = flm_ft_idx->id1; + + pattern_action_pair->flm_rpl_ext_ptr = fh->flm_rpl_ext_ptr; + pattern_action_pair->flm_scrub_prof = fh->flm_scrub_prof; + + atomic_store(&pattern_action_pair->status, + CELL_STATUS_INITIALIZED_TYPE_FLM); + + /* increment template table cell reference */ + atomic_fetch_add(&pattern_action_pair->counter, 1); + fh->template_table_cell = pattern_action_pair; + fh->flm_async = true; + + } else { + atomic_store(&pattern_action_pair->status, + CELL_STATUS_INITIALIZED_TYPE_FLOW); + } + + } else { + do { + nt_os_wait_usec(1); + status = atomic_load(&pattern_action_pair->status); + } while (status == CELL_STATUS_INITIALIZING); + + /* error handling in case that create_flow_filter() will fail in the other + * thread + */ + if (status == CELL_STATUS_UNINITIALIZED) + goto err_exit; + } + } + + /* FLM learn */ + if (fh == NULL && status == CELL_STATUS_INITIALIZED_TYPE_FLM) { + fh = calloc(1, sizeof(struct flow_handle)); + + fh->type = FLOW_HANDLE_TYPE_FLM; + fh->dev = dev; + fh->caller_id = template_table->caller_id; + fh->user_data = user_data; + + copy_fd_to_fh_flm(fh, fd, packet_data, pattern_action_pair->flm_key_id, + pattern_action_pair->flm_ft, + pattern_action_pair->flm_rpl_ext_ptr, + pattern_action_pair->flm_scrub_prof, + template_table->attr.priority & 0x3); + + free(fd); + + flm_flow_programming(fh, NT_FLM_OP_LEARN); + + nic_insert_flow_flm(dev->ndev, fh); + + /* increment template table cell reference */ + atomic_fetch_add(&pattern_action_pair->counter, 1); + fh->template_table_cell = pattern_action_pair; + fh->flm_async = true; + + } else if (fh == NULL) { + rte_spinlock_lock(&dev->ndev->mtx); + + fh = create_flow_filter(dev, fd, &template_table->attr, + template_table->forced_vlan_vid, template_table->caller_id, + error, port_id, num_dest_port, num_queues, packet_data, + packet_mask, &key_def); + + rte_spinlock_unlock(&dev->ndev->mtx); + + if (fh == NULL) + goto err_exit; + } + + if (fh) { + fh->caller_id = template_table->caller_id; + fh->user_data = user_data; + } + + return fh; + +err_exit: + free(fd); + free(fh); + + return NULL; +} + +int flow_async_destroy_profile_inline(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, struct flow_handle *flow, + void *user_data, struct rte_flow_error *error) +{ + (void)queue_id; + (void)op_attr; + (void)user_data; + + if (flow->type == FLOW_HANDLE_TYPE_FLOW) + return flow_destroy_profile_inline(dev, flow, error); + + if (flm_flow_programming(flow, NT_FLM_OP_UNLEARN)) { + NT_LOG(ERR, FILTER, "FAILED to destroy flow: %p", flow); + flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error); + return -1; + } + + nic_remove_flow_flm(dev->ndev, flow); + + free(flow); + + return 0; +} + +static const struct profile_inline_ops ops = { + /* + * Management + */ + .done_flow_management_of_ndev_profile_inline = done_flow_management_of_ndev_profile_inline, + .initialize_flow_management_of_ndev_profile_inline = + initialize_flow_management_of_ndev_profile_inline, + .flow_dev_dump_profile_inline = flow_dev_dump_profile_inline, + /* + * Flow functionality + */ + .flow_destroy_locked_profile_inline = flow_destroy_locked_profile_inline, + .flow_create_profile_inline = flow_create_profile_inline, + .flow_destroy_profile_inline = flow_destroy_profile_inline, + .flow_flush_profile_inline = flow_flush_profile_inline, + .flow_actions_update_profile_inline = flow_actions_update_profile_inline, + .flow_nic_set_hasher_fields_inline = flow_nic_set_hasher_fields_inline, + .flow_get_aged_flows_profile_inline = flow_get_aged_flows_profile_inline, + /* + * Stats + */ + .flow_get_flm_stats_profile_inline = flow_get_flm_stats_profile_inline, + .flow_info_get_profile_inline = flow_info_get_profile_inline, + .flow_configure_profile_inline = flow_configure_profile_inline, + .flow_pattern_template_create_profile_inline = flow_pattern_template_create_profile_inline, + .flow_pattern_template_destroy_profile_inline = + flow_pattern_template_destroy_profile_inline, + .flow_actions_template_create_profile_inline = flow_actions_template_create_profile_inline, + .flow_actions_template_destroy_profile_inline = + flow_actions_template_destroy_profile_inline, + .flow_template_table_create_profile_inline = flow_template_table_create_profile_inline, + .flow_template_table_destroy_profile_inline = flow_template_table_destroy_profile_inline, + .flow_async_create_profile_inline = flow_async_create_profile_inline, + .flow_async_destroy_profile_inline = flow_async_destroy_profile_inline, + /* + * NT Flow FLM Meter API + */ + .flow_mtr_supported = flow_mtr_supported, + .flow_mtr_meter_policy_n_max = flow_mtr_meter_policy_n_max, + .flow_mtr_set_profile = flow_mtr_set_profile, + .flow_mtr_set_policy = flow_mtr_set_policy, + .flow_mtr_create_meter = flow_mtr_create_meter, + .flow_mtr_probe_meter = flow_mtr_probe_meter, + .flow_mtr_destroy_meter = flow_mtr_destroy_meter, + .flm_mtr_adjust_stats = flm_mtr_adjust_stats, + .flow_mtr_meters_supported = flow_mtr_meters_supported, + .flm_setup_queues = flm_setup_queues, + .flm_free_queues = flm_free_queues, + .flm_mtr_read_stats = flm_mtr_read_stats, + .flm_update = flm_update, + + /* + * Config API + */ + .flow_set_mtu_inline = flow_set_mtu_inline, +}; + +void profile_inline_init(void) +{ + register_profile_inline_ops(&ops); +} diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h new file mode 100644 index 0000000000..ce1a0669ee --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef _FLOW_API_PROFILE_INLINE_H_ +#define _FLOW_API_PROFILE_INLINE_H_ + +#include + +#include "flow_api.h" +#include "stream_binary_flow_api.h" + +#define DISABLE_FRAGMENTATION 0 +#define IPV4_DF_DROP 1 +#define IPV6_DROP 1 + +/* + * Management + */ + +int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev); + +int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev); + +/* + * Flow functionality + */ +int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *fh, + struct rte_flow_error *error); + +struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_attr *attr, + uint16_t forced_vlan_vid, + uint16_t caller_id, + const struct rte_flow_item elem[], + const struct rte_flow_action action[], + struct rte_flow_error *error); + +int flow_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *flow, + struct rte_flow_error *error); + +int flow_flush_profile_inline(struct flow_eth_dev *dev, + uint16_t caller_id, + struct rte_flow_error *error); + +int flow_actions_update_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *flow, + const struct rte_flow_action action[], + struct rte_flow_error *error); + +int flow_dev_dump_profile_inline(struct flow_eth_dev *dev, + struct flow_handle *flow, + uint16_t caller_id, + FILE *file, + struct rte_flow_error *error); + +int flow_get_aged_flows_profile_inline(struct flow_eth_dev *dev, + uint16_t caller_id, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error); + +int flow_nic_set_hasher_fields_inline(struct flow_nic_dev *ndev, + int hsh_idx, + struct nt_eth_rss_conf rss_conf); + +/* + * Stats + */ + +int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size); + +/* + * RTE flow asynchronous operations functions + */ + +struct flow_pattern_template *flow_pattern_template_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_item pattern[], struct rte_flow_error *error); + +int flow_pattern_template_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_pattern_template *pattern_template, + struct rte_flow_error *error); + +struct flow_actions_template *flow_actions_template_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_actions_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_action actions[], const struct rte_flow_action masks[], + struct rte_flow_error *error); + +int flow_actions_template_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_actions_template *actions_template, + struct rte_flow_error *error); + +struct flow_template_table *flow_template_table_create_profile_inline(struct flow_eth_dev *dev, + const struct rte_flow_template_table_attr *table_attr, uint16_t forced_vlan_vid, + uint16_t caller_id, + struct flow_pattern_template *pattern_templates[], uint8_t nb_pattern_templates, + struct flow_actions_template *actions_templates[], uint8_t nb_actions_templates, + struct rte_flow_error *error); + +int flow_template_table_destroy_profile_inline(struct flow_eth_dev *dev, + struct flow_template_table *template_table, + struct rte_flow_error *error); + +struct flow_handle *flow_async_create_profile_inline(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct flow_template_table *template_table, const struct rte_flow_item pattern[], + uint8_t pattern_template_index, const struct rte_flow_action actions[], + uint8_t actions_template_index, void *user_data, struct rte_flow_error *error); + +int flow_async_destroy_profile_inline(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, struct flow_handle *flow, + void *user_data, struct rte_flow_error *error); + +int flow_info_get_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id, + struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, struct rte_flow_error *error); + +int flow_configure_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id, + const struct rte_flow_port_attr *port_attr, uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error); + +/* + * Config API + */ +int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu); + +#endif /* _FLOW_API_PROFILE_INLINE_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h new file mode 100644 index 0000000000..c665cab16a --- /dev/null +++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline_config.h @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef _FLOW_API_PROFILE_INLINE_CONFIG_H_ +#define _FLOW_API_PROFILE_INLINE_CONFIG_H_ + +/* + * Per port configuration for IPv4 fragmentation and DF flag handling + * + * ||-------------------------------------||-------------------------||----------|| + * || Configuration || Egress packet type || || + * ||-------------------------------------||-------------------------|| Action || + * || IPV4_FRAGMENTATION | IPV4_DF_ACTION || Exceeding MTU | DF flag || || + * ||-------------------------------------||-------------------------||----------|| + * || DISABLE | - || - | - || Forward || + * ||-------------------------------------||-------------------------||----------|| + * || ENABLE | DF_DROP || no | - || Forward || + * || | || yes | 0 || Fragment || + * || | || yes | 1 || Drop || + * ||-------------------------------------||-------------------------||----------|| + * || ENABLE | DF_FORWARD || no | - || Forward || + * || | || yes | 0 || Fragment || + * || | || yes | 1 || Forward || + * ||-------------------------------------||-------------------------||----------|| + */ + +#define PORT_0_IPV4_FRAGMENTATION DISABLE_FRAGMENTATION +#define PORT_0_IPV4_DF_ACTION IPV4_DF_DROP + +#define PORT_1_IPV4_FRAGMENTATION DISABLE_FRAGMENTATION +#define PORT_1_IPV4_DF_ACTION IPV4_DF_DROP + +/* + * Per port configuration for IPv6 fragmentation + * + * ||-------------------------------------||-------------------------||----------|| + * || Configuration || Egress packet type || || + * ||-------------------------------------||-------------------------|| Action || + * || IPV6_FRAGMENTATION | IPV6_ACTION || Exceeding MTU || || + * ||-------------------------------------||-------------------------||----------|| + * || DISABLE | - || - || Forward || + * ||-------------------------------------||-------------------------||----------|| + * || ENABLE | DROP || no || Forward || + * || | || yes || Drop || + * ||-------------------------------------||-------------------------||----------|| + * || ENABLE | FRAGMENT || no || Forward || + * || | || yes || Fragment || + * ||-------------------------------------||-------------------------||----------|| + */ + +#define PORT_0_IPV6_FRAGMENTATION DISABLE_FRAGMENTATION +#define PORT_0_IPV6_ACTION IPV6_DROP + +#define PORT_1_IPV6_FRAGMENTATION DISABLE_FRAGMENTATION +#define PORT_1_IPV6_ACTION IPV6_DROP + +/* + * Statistics are generated each time the byte counter crosses a limit. + * If BYTE_LIMIT is zero then the byte counter does not trigger statistics + * generation. + * + * Format: 2^(BYTE_LIMIT + 15) bytes + * Valid range: 0 to 31 + * + * Example: 2^(8 + 15) = 2^23 ~~ 8MB + */ +#define NTNIC_FLOW_PERIODIC_STATS_BYTE_LIMIT 8 + +/* + * Statistics are generated each time the packet counter crosses a limit. + * If PKT_LIMIT is zero then the packet counter does not trigger statistics + * generation. + * + * Format: 2^(PKT_LIMIT + 11) pkts + * Valid range: 0 to 31 + * + * Example: 2^(5 + 11) = 2^16 pkts ~~ 64K pkts + */ +#define NTNIC_FLOW_PERIODIC_STATS_PKT_LIMIT 5 + +/* + * Statistics are generated each time flow time (measured in ns) crosses a + * limit. + * If BYTE_TIMEOUT is zero then the flow time does not trigger statistics + * generation. + * + * Format: 2^(BYTE_TIMEOUT + 15) ns + * Valid range: 0 to 31 + * + * Example: 2^(23 + 15) = 2^38 ns ~~ 275 sec + */ +#define NTNIC_FLOW_PERIODIC_STATS_BYTE_TIMEOUT 23 + +/* + * This define sets the percentage of the full processing capacity + * being reserved for scan operations. The scanner is responsible + * for detecting aged out flows and meters with statistics timeout. + * + * A high scanner load percentage will make this detection more precise + * but will also give lower packet processing capacity. + * + * The percentage is given as a decimal number, e.g. 0.01 for 1%, which is the recommended value. + */ +#define NTNIC_SCANNER_LOAD 0.01 + +/* + * This define sets the timeout resolution of aged flow scanner (scrubber). + * + * The timeout resolution feature is provided in order to reduce the number of + * write-back operations for flows without attached meter. If the resolution + * is disabled (set to 0) and flow timeout is enabled via age action, then a write-back + * occurs every the flow is evicted from the flow cache, essentially causing the + * lookup performance to drop to that of a flow with meter. By setting the timeout + * resolution (>0), write-back for flows happens only when the difference between + * the last recorded time for the flow and the current time exceeds the chosen resolution. + * + * The parameter value is a power of 2 in units of 2^28 nanoseconds. It means that value 8 sets + * the timeout resolution to: 2^8 * 2^28 / 1e9 = 68,7 seconds + * + * NOTE: This parameter has a significant impact on flow lookup performance, especially + * if full scanner timeout resolution (=0) is configured. + */ +#define NTNIC_SCANNER_TIMEOUT_RESOLUTION 8 + +#endif /* _FLOW_API_PROFILE_INLINE_CONFIG_H_ */ diff --git a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c index 6f3b381a17..8855978349 100644 --- a/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c +++ b/drivers/net/ntnic/nthw/flow_filter/flow_nthw_flm.c @@ -678,11 +678,13 @@ int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free, uint3 uint32_t address_bufctrl = nthw_register_get_address(p->mp_buf_ctrl); nthw_rab_bus_id_t bus_id = 1; struct dma_buf_ptr bc_buf; - ret = nthw_rac_rab_dma_begin(rac); + rte_spinlock_lock(&rac->m_mutex); + ret = !rac->m_dma_active ? nthw_rac_rab_dma_begin(rac) : -1; if (ret == 0) { nthw_rac_rab_read32_dma(rac, bus_id, address_bufctrl, 2, &bc_buf); - ret = nthw_rac_rab_dma_commit(rac); + ret = rac->m_dma_active ? nthw_rac_rab_dma_commit(rac) : (assert(0), -1); + rte_spinlock_unlock(&rac->m_mutex); if (ret != 0) return ret; @@ -692,6 +694,13 @@ int flm_nthw_buf_ctrl_update(const struct flm_nthw *p, uint32_t *lrn_free, uint3 *lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff; *inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff; *sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff; + } else { + rte_spinlock_unlock(&rac->m_mutex); + const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info; + const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str; + NT_LOG(ERR, NTHW, + "%s: DMA begin requested, but a DMA transaction is already active", + p_adapter_id_str); } return ret; @@ -716,8 +725,10 @@ int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data, uint *handled_records = 0; int max_tries = 10000; - while (*inf_avail == 0 && *sta_avail == 0 && records != 0 && --max_tries > 0) - if (nthw_rac_rab_dma_begin(rac) == 0) { + while (*inf_avail == 0 && *sta_avail == 0 && records != 0 && --max_tries > 0) { + rte_spinlock_lock(&rac->m_mutex); + int ret = !rac->m_dma_active ? nthw_rac_rab_dma_begin(rac) : -1; + if (ret == 0) { uint32_t dma_free = nthw_rac_rab_get_free(rac); if (dma_free != RAB_DMA_BUF_CNT) { @@ -770,7 +781,11 @@ int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data, uint /* Read buf ctrl */ nthw_rac_rab_read32_dma(rac, bus_id, address_bufctrl, 2, &bc_buf); - if (nthw_rac_rab_dma_commit(rac) != 0) + int ret = rac->m_dma_active ? + nthw_rac_rab_dma_commit(rac) : + (assert(0), -1); + rte_spinlock_unlock(&rac->m_mutex); + if (ret != 0) return -1; uint32_t bc_mask = bc_buf.size - 1; @@ -778,8 +793,15 @@ int flm_nthw_lrn_data_flush(const struct flm_nthw *p, const uint32_t *data, uint *lrn_free = bc_buf.base[bc_index & bc_mask] & 0xffff; *inf_avail = (bc_buf.base[bc_index & bc_mask] >> 16) & 0xffff; *sta_avail = bc_buf.base[(bc_index + 1) & bc_mask] & 0xffff; + } else { + rte_spinlock_unlock(&rac->m_mutex); + const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info; + const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str; + NT_LOG(ERR, NTHW, + "%s: DMA begin requested, but a DMA transaction is already active", + p_adapter_id_str); } - + } return 0; } @@ -801,7 +823,8 @@ int flm_nthw_inf_sta_data_update(const struct flm_nthw *p, uint32_t *inf_data, uint32_t mask; uint32_t index; - ret = nthw_rac_rab_dma_begin(rac); + rte_spinlock_lock(&rac->m_mutex); + ret = !rac->m_dma_active ? nthw_rac_rab_dma_begin(rac) : -1; if (ret == 0) { /* Announce the number of words to read from INF_DATA */ @@ -821,7 +844,8 @@ int flm_nthw_inf_sta_data_update(const struct flm_nthw *p, uint32_t *inf_data, } nthw_rac_rab_read32_dma(rac, bus_id, address_bufctrl, 2, &bc_buf); - ret = nthw_rac_rab_dma_commit(rac); + ret = rac->m_dma_active ? nthw_rac_rab_dma_commit(rac) : (assert(0), -1); + rte_spinlock_unlock(&rac->m_mutex); if (ret != 0) return ret; @@ -847,6 +871,13 @@ int flm_nthw_inf_sta_data_update(const struct flm_nthw *p, uint32_t *inf_data, *lrn_free = bc_buf.base[index & mask] & 0xffff; *inf_avail = (bc_buf.base[index & mask] >> 16) & 0xffff; *sta_avail = bc_buf.base[(index + 1) & mask] & 0xffff; + } else { + rte_spinlock_unlock(&rac->m_mutex); + const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info; + const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str; + NT_LOG(ERR, NTHW, + "%s: DMA begin requested, but a DMA transaction is already active", + p_adapter_id_str); } return ret; diff --git a/drivers/net/ntnic/nthw/model/nthw_fpga_model.c b/drivers/net/ntnic/nthw/model/nthw_fpga_model.c index 4d495f5b96..9eaaeb550d 100644 --- a/drivers/net/ntnic/nthw/model/nthw_fpga_model.c +++ b/drivers/net/ntnic/nthw/model/nthw_fpga_model.c @@ -1050,6 +1050,18 @@ uint32_t nthw_field_get_val32(const nthw_field_t *p) return val; } +int32_t nthw_field_get_signed(const nthw_field_t *p) +{ + uint32_t val; + + nthw_field_get_val(p, &val, 1); + + if (val & (1U << nthw_field_get_bit_pos_high(p))) /* check sign */ + val = val | ~nthw_field_get_mask(p); /* sign extension */ + + return (int32_t)val; /* cast to signed value */ +} + uint32_t nthw_field_get_updated(const nthw_field_t *p) { uint32_t val; diff --git a/drivers/net/ntnic/nthw/model/nthw_fpga_model.h b/drivers/net/ntnic/nthw/model/nthw_fpga_model.h index 7956f0689e..d4e7ab3edd 100644 --- a/drivers/net/ntnic/nthw/model/nthw_fpga_model.h +++ b/drivers/net/ntnic/nthw/model/nthw_fpga_model.h @@ -227,6 +227,7 @@ void nthw_field_get_val(const nthw_field_t *p, uint32_t *p_data, uint32_t len); void nthw_field_set_val(const nthw_field_t *p, const uint32_t *p_data, uint32_t len); void nthw_field_set_val_flush(const nthw_field_t *p, const uint32_t *p_data, uint32_t len); uint32_t nthw_field_get_val32(const nthw_field_t *p); +int32_t nthw_field_get_signed(const nthw_field_t *p); uint32_t nthw_field_get_updated(const nthw_field_t *p); void nthw_field_update_register(const nthw_field_t *p); void nthw_field_flush_register(const nthw_field_t *p); diff --git a/drivers/net/ntnic/nthw/nthw_rac.c b/drivers/net/ntnic/nthw/nthw_rac.c index 461da8e104..ca6aba6db2 100644 --- a/drivers/net/ntnic/nthw/nthw_rac.c +++ b/drivers/net/ntnic/nthw/nthw_rac.c @@ -3,6 +3,7 @@ * Copyright(c) 2023 Napatech A/S */ +#include "rte_spinlock.h" #include "nt_util.h" #include "ntlog.h" @@ -10,8 +11,6 @@ #include "nthw_register.h" #include "nthw_rac.h" -#include - #define RAB_DMA_WAIT (1000000) #define RAB_READ (0x01) @@ -217,7 +216,7 @@ int nthw_rac_init(nthw_rac_t *p, nthw_fpga_t *p_fpga, struct fpga_info_s *p_fpga } } - pthread_mutex_init(&p->m_mutex, NULL); + rte_spinlock_init(&p->m_mutex); return 0; } @@ -389,19 +388,6 @@ void nthw_rac_bar0_write32(const struct fpga_info_s *p_fpga_info, uint32_t reg_a int nthw_rac_rab_dma_begin(nthw_rac_t *p) { - const struct fpga_info_s *const p_fpga_info = p->mp_fpga->p_fpga_info; - const char *const p_adapter_id_str = p_fpga_info->mp_adapter_id_str; - - pthread_mutex_lock(&p->m_mutex); - - if (p->m_dma_active) { - pthread_mutex_unlock(&p->m_mutex); - NT_LOG(ERR, NTHW, - "%s: DMA begin requested, but a DMA transaction is already active", - p_adapter_id_str); - return -1; - } - p->m_dma_active = true; return 0; @@ -454,19 +440,11 @@ int nthw_rac_rab_dma_commit(nthw_rac_t *p) { int ret; - if (!p->m_dma_active) { - /* Expecting mutex not to be locked! */ - assert(0); /* alert developer that something is wrong */ - return -1; - } - nthw_rac_rab_dma_activate(p); ret = nthw_rac_rab_dma_wait(p); p->m_dma_active = false; - pthread_mutex_unlock(&p->m_mutex); - return ret; } @@ -602,7 +580,7 @@ int nthw_rac_rab_write32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint return -1; } - pthread_mutex_lock(&p->m_mutex); + rte_spinlock_lock(&p->m_mutex); if (p->m_dma_active) { NT_LOG(ERR, NTHW, "%s: RAB: Illegal operation: DMA enabled", p_adapter_id_str); @@ -748,7 +726,7 @@ int nthw_rac_rab_write32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint } exit_unlock_res: - pthread_mutex_unlock(&p->m_mutex); + rte_spinlock_unlock(&p->m_mutex); return res; } @@ -763,7 +741,7 @@ int nthw_rac_rab_read32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint3 uint32_t out_buf_free; int res = 0; - pthread_mutex_lock(&p->m_mutex); + rte_spinlock_lock(&p->m_mutex); if (address > (1 << RAB_ADDR_BW)) { NT_LOG(ERR, NTHW, "%s: RAB: Illegal address: value too large %d - max %d", @@ -923,7 +901,7 @@ int nthw_rac_rab_read32(nthw_rac_t *p, bool trc, nthw_rab_bus_id_t bus_id, uint3 } exit_unlock_res: - pthread_mutex_unlock(&p->m_mutex); + rte_spinlock_unlock(&p->m_mutex); return res; } @@ -935,7 +913,7 @@ int nthw_rac_rab_flush(nthw_rac_t *p) uint32_t retry; int res = 0; - pthread_mutex_lock(&p->m_mutex); + rte_spinlock_lock(&p->m_mutex); /* Set the flush bit */ nthw_rac_reg_write32(p_fpga_info, p->RAC_RAB_BUF_USED_ADDR, @@ -960,6 +938,6 @@ int nthw_rac_rab_flush(nthw_rac_t *p) /* Clear flush bit when done */ nthw_rac_reg_write32(p_fpga_info, p->RAC_RAB_BUF_USED_ADDR, 0x0); - pthread_mutex_unlock(&p->m_mutex); + rte_spinlock_unlock(&p->m_mutex); return res; } diff --git a/drivers/net/ntnic/nthw/nthw_rac.h b/drivers/net/ntnic/nthw/nthw_rac.h index c64dac9da9..df92b487af 100644 --- a/drivers/net/ntnic/nthw/nthw_rac.h +++ b/drivers/net/ntnic/nthw/nthw_rac.h @@ -16,7 +16,7 @@ struct nthw_rac { nthw_fpga_t *mp_fpga; nthw_module_t *mp_mod_rac; - pthread_mutex_t m_mutex; + rte_spinlock_t m_mutex; int mn_param_rac_rab_interfaces; int mn_param_rac_rab_ob_update; diff --git a/drivers/net/ntnic/nthw/ntnic_meter/ntnic_meter.c b/drivers/net/ntnic/nthw/ntnic_meter/ntnic_meter.c new file mode 100644 index 0000000000..33593927a4 --- /dev/null +++ b/drivers/net/ntnic/nthw/ntnic_meter/ntnic_meter.c @@ -0,0 +1,483 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include + +#include +#include +#include +#include +#include + +#include "ntos_drv.h" +#include "ntlog.h" +#include "nt_util.h" +#include "ntos_system.h" +#include "ntnic_mod_reg.h" + +static inline uint8_t get_caller_id(uint16_t port) +{ + return MAX_VDPA_PORTS + (uint8_t)(port & 0x7f) + 1; +} + +struct qos_integer_fractional { + uint32_t integer; + uint32_t fractional; /* 1/1024 */ +}; + +/* + * Inline FLM metering + */ + +static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *eth_dev, + struct rte_mtr_capabilities *cap, + struct rte_mtr_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + uint8_t caller_id = get_caller_id(eth_dev->data->port_id); + + if (!profile_inline_ops->flow_mtr_supported(internals->flw_dev)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Ethernet device does not support metering"); + } + + memset(cap, 0x0, sizeof(struct rte_mtr_capabilities)); + + /* MBR records use 28-bit integers */ + cap->n_max = profile_inline_ops->flow_mtr_meters_supported(internals->flw_dev, + caller_id); + cap->n_shared_max = cap->n_max; + + cap->identical = 0; + cap->shared_identical = 0; + + cap->shared_n_flows_per_mtr_max = UINT32_MAX; + + /* Limited by number of MBR record ids per FLM learn record */ + cap->chaining_n_mtrs_per_flow_max = 4; + + cap->chaining_use_prev_mtr_color_supported = 0; + cap->chaining_use_prev_mtr_color_enforced = 0; + + cap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099; + + cap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN | RTE_MTR_STATS_N_BYTES_GREEN; + + /* Only color-blind mode is supported */ + cap->color_aware_srtcm_rfc2697_supported = 0; + cap->color_aware_trtcm_rfc2698_supported = 0; + cap->color_aware_trtcm_rfc4115_supported = 0; + + /* Focused on RFC2698 for now */ + cap->meter_srtcm_rfc2697_n_max = 0; + cap->meter_trtcm_rfc2698_n_max = cap->n_max; + cap->meter_trtcm_rfc4115_n_max = 0; + + cap->meter_policy_n_max = profile_inline_ops->flow_mtr_meter_policy_n_max(); + + /* Byte mode is supported */ + cap->srtcm_rfc2697_byte_mode_supported = 0; + cap->trtcm_rfc2698_byte_mode_supported = 1; + cap->trtcm_rfc4115_byte_mode_supported = 0; + + /* Packet mode not supported */ + cap->srtcm_rfc2697_packet_mode_supported = 0; + cap->trtcm_rfc2698_packet_mode_supported = 0; + cap->trtcm_rfc4115_packet_mode_supported = 0; + + return 0; +} + +static int eth_mtr_meter_profile_add_inline(struct rte_eth_dev *eth_dev, + uint32_t meter_profile_id, + struct rte_mtr_meter_profile *profile, + struct rte_mtr_error *error __rte_unused) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + if (meter_profile_id >= profile_inline_ops->flow_mtr_meter_policy_n_max()) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL, + "Profile id out of range"); + + if (profile->packet_mode != 0) { + return -rte_mtr_error_set(error, EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL, + "Profile packet mode not supported"); + } + + if (profile->alg == RTE_MTR_SRTCM_RFC2697) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL, + "RFC 2697 not supported"); + } + + if (profile->alg == RTE_MTR_TRTCM_RFC4115) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL, + "RFC 4115 not supported"); + } + + if (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir || + profile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL, + "Profile committed and peak rates must be equal"); + } + + int res = profile_inline_ops->flow_mtr_set_profile(internals->flw_dev, meter_profile_id, + profile->trtcm_rfc2698.cir, + profile->trtcm_rfc2698.cbs, 0, 0); + + if (res) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL, + "Profile could not be added."); + } + + return 0; +} + +static int eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *eth_dev, + uint32_t meter_profile_id, + struct rte_mtr_error *error __rte_unused) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + if (meter_profile_id >= profile_inline_ops->flow_mtr_meter_policy_n_max()) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL, + "Profile id out of range"); + + profile_inline_ops->flow_mtr_set_profile(internals->flw_dev, meter_profile_id, 0, 0, 0, 0); + + return 0; +} + +static int eth_mtr_meter_policy_add_inline(struct rte_eth_dev *eth_dev, + uint32_t policy_id, + struct rte_mtr_meter_policy_params *policy, + struct rte_mtr_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + if (policy_id >= profile_inline_ops->flow_mtr_meter_policy_n_max()) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL, + "Policy id out of range"); + + const struct rte_flow_action *actions = policy->actions[RTE_COLOR_GREEN]; + int green_action_supported = (actions[0].type == RTE_FLOW_ACTION_TYPE_END) || + (actions[0].type == RTE_FLOW_ACTION_TYPE_VOID && + actions[1].type == RTE_FLOW_ACTION_TYPE_END) || + (actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU && + actions[1].type == RTE_FLOW_ACTION_TYPE_END); + + actions = policy->actions[RTE_COLOR_YELLOW]; + int yellow_action_supported = actions[0].type == RTE_FLOW_ACTION_TYPE_DROP && + actions[1].type == RTE_FLOW_ACTION_TYPE_END; + + actions = policy->actions[RTE_COLOR_RED]; + int red_action_supported = actions[0].type == RTE_FLOW_ACTION_TYPE_DROP && + actions[1].type == RTE_FLOW_ACTION_TYPE_END; + + if (green_action_supported == 0 || yellow_action_supported == 0 || + red_action_supported == 0) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, + "Unsupported meter policy actions"); + } + + if (profile_inline_ops->flow_mtr_set_policy(internals->flw_dev, policy_id, 1)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, NULL, + "Policy could not be added"); + } + + return 0; +} + +static int eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *eth_dev __rte_unused, + uint32_t policy_id, + struct rte_mtr_error *error __rte_unused) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + if (policy_id >= profile_inline_ops->flow_mtr_meter_policy_n_max()) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL, + "Policy id out of range"); + + return 0; +} + +static int eth_mtr_create_inline(struct rte_eth_dev *eth_dev, + uint32_t mtr_id, + struct rte_mtr_params *params, + int shared, + struct rte_mtr_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + uint8_t caller_id = get_caller_id(eth_dev->data->port_id); + + if (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Only color blind mode is supported"); + } + + uint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN | RTE_MTR_STATS_N_BYTES_GREEN; + + if ((params->stats_mask & ~allowed_stats_mask) != 0) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Requested color stats not supported"); + } + + if (params->meter_enable == 0) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Disabled meters not supported"); + } + + if (shared == 0) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Only shared mtrs are supported"); + } + + if (params->meter_profile_id >= profile_inline_ops->flow_mtr_meter_policy_n_max()) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, NULL, + "Profile id out of range"); + + if (params->meter_policy_id >= profile_inline_ops->flow_mtr_meter_policy_n_max()) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL, + "Policy id out of range"); + + if (mtr_id >= + profile_inline_ops->flow_mtr_meters_supported(internals->flw_dev, caller_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "MTR id is out of range"); + } + + int res = profile_inline_ops->flow_mtr_create_meter(internals->flw_dev, + caller_id, + mtr_id, + params->meter_profile_id, + params->meter_policy_id, + params->stats_mask); + + if (res) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to offload to hardware"); + } + + return 0; +} + +static int eth_mtr_destroy_inline(struct rte_eth_dev *eth_dev, + uint32_t mtr_id, + struct rte_mtr_error *error __rte_unused) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + uint8_t caller_id = get_caller_id(eth_dev->data->port_id); + + if (mtr_id >= + profile_inline_ops->flow_mtr_meters_supported(internals->flw_dev, caller_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "MTR id is out of range"); + } + + if (profile_inline_ops->flow_mtr_destroy_meter(internals->flw_dev, caller_id, mtr_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to offload to hardware"); + } + + return 0; +} + +static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *eth_dev, + uint32_t mtr_id, + uint64_t adjust_value, + struct rte_mtr_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + const uint64_t adjust_bit = 1ULL << 63; + const uint64_t probe_bit = 1ULL << 62; + struct pmd_internals *internals = eth_dev->data->dev_private; + uint8_t caller_id = get_caller_id(eth_dev->data->port_id); + + if (mtr_id >= + profile_inline_ops->flow_mtr_meters_supported(internals->flw_dev, caller_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "MTR id is out of range"); + } + + if (adjust_value & adjust_bit) { + adjust_value &= adjust_bit - 1; + + if (adjust_value > (uint64_t)UINT32_MAX) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, "Adjust value is out of range"); + } + + if (profile_inline_ops->flm_mtr_adjust_stats(internals->flw_dev, caller_id, mtr_id, + (uint32_t)adjust_value)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to adjust offloaded MTR"); + } + + return 0; + } + + if (adjust_value & probe_bit) { + if (mtr_id >= + profile_inline_ops->flow_mtr_meters_supported(internals->flw_dev, + caller_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, "MTR id is out of range"); + } + + if (profile_inline_ops->flow_mtr_probe_meter(internals->flw_dev, caller_id, + mtr_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to offload to hardware"); + } + + return 0; + } + + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "Use of meter stats update requires bit 63 or bit 62 of \"stats_mask\" must be 1."); +} + +static int eth_mtr_stats_read_inline(struct rte_eth_dev *eth_dev, + uint32_t mtr_id, + struct rte_mtr_stats *stats, + uint64_t *stats_mask, + int clear, + struct rte_mtr_error *error) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTHW, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + uint8_t caller_id = get_caller_id(eth_dev->data->port_id); + + if (mtr_id >= + profile_inline_ops->flow_mtr_meters_supported(internals->flw_dev, caller_id)) { + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, + "MTR id is out of range"); + } + + memset(stats, 0x0, sizeof(struct rte_mtr_stats)); + profile_inline_ops->flm_mtr_read_stats(internals->flw_dev, caller_id, mtr_id, stats_mask, + &stats->n_pkts[RTE_COLOR_GREEN], + &stats->n_bytes[RTE_COLOR_GREEN], clear); + + return 0; +} + +/* + * Ops setup + */ + +static const struct rte_mtr_ops mtr_ops_inline = { + .capabilities_get = eth_mtr_capabilities_get_inline, + .meter_profile_add = eth_mtr_meter_profile_add_inline, + .meter_profile_delete = eth_mtr_meter_profile_delete_inline, + .create = eth_mtr_create_inline, + .destroy = eth_mtr_destroy_inline, + .meter_policy_add = eth_mtr_meter_policy_add_inline, + .meter_policy_delete = eth_mtr_meter_policy_delete_inline, + .stats_update = eth_mtr_stats_adjust_inline, + .stats_read = eth_mtr_stats_read_inline, +}; + +static int eth_mtr_ops_get(struct rte_eth_dev *eth_dev, void *ops) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + ntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv; + enum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile; + + switch (profile) { + case FPGA_INFO_PROFILE_INLINE: + *(const struct rte_mtr_ops **)ops = &mtr_ops_inline; + break; + + case FPGA_INFO_PROFILE_UNKNOWN: + + /* fallthrough */ + case FPGA_INFO_PROFILE_CAPTURE: + + /* fallthrough */ + default: + NT_LOG(ERR, NTHW, "" PCIIDENT_PRINT_STR ": fpga profile not supported", + PCIIDENT_TO_DOMAIN(p_nt_drv->pciident), + PCIIDENT_TO_BUSNR(p_nt_drv->pciident), + PCIIDENT_TO_DEVNR(p_nt_drv->pciident), + PCIIDENT_TO_FUNCNR(p_nt_drv->pciident)); + return -1; + } + + return 0; +} + +static struct meter_ops_s meter_ops = { + .eth_mtr_ops_get = eth_mtr_ops_get, +}; + +void meter_init(void) +{ + NT_LOG(DBG, NTNIC, "Meter ops initialized"); + register_meter_ops(&meter_ops); +} diff --git a/drivers/net/ntnic/nthw/rte_pmd_ntnic.h b/drivers/net/ntnic/nthw/rte_pmd_ntnic.h new file mode 100644 index 0000000000..4a1ba18a5e --- /dev/null +++ b/drivers/net/ntnic/nthw/rte_pmd_ntnic.h @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#ifndef NTNIC_EVENT_H_ +#define NTNIC_EVENT_H_ + +#include + +typedef struct ntnic_flm_load_s { + uint64_t lookup; + uint64_t lookup_maximum; + uint64_t access; + uint64_t access_maximum; +} ntnic_flm_load_t; + +typedef struct ntnic_port_load_s { + uint64_t rx_pps; + uint64_t rx_pps_maximum; + uint64_t tx_pps; + uint64_t tx_pps_maximum; + uint64_t rx_bps; + uint64_t rx_bps_maximum; + uint64_t tx_bps; + uint64_t tx_bps_maximum; +} ntnic_port_load_t; + +struct ntnic_flm_statistic_s { + uint64_t bytes; + uint64_t packets; + uint64_t timestamp; + uint64_t id; + uint8_t cause; +}; + +enum rte_ntnic_event_type { + RTE_NTNIC_FLM_LOAD_EVENT = RTE_ETH_EVENT_MAX, + RTE_NTNIC_PORT_LOAD_EVENT, + RTE_NTNIC_FLM_STATS_EVENT, +}; + +#endif /* NTNIC_EVENT_H_ */ diff --git a/drivers/net/ntnic/nthw/stat/nthw_stat.c b/drivers/net/ntnic/nthw/stat/nthw_stat.c new file mode 100644 index 0000000000..078eec5e1f --- /dev/null +++ b/drivers/net/ntnic/nthw/stat/nthw_stat.c @@ -0,0 +1,498 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include "nt_util.h" +#include "ntlog.h" + +#include "nthw_drv.h" +#include "nthw_register.h" + +#include "ntnic_stat.h" + +#include + +nthw_stat_t *nthw_stat_new(void) +{ + nthw_stat_t *p = malloc(sizeof(nthw_stat_t)); + + if (p) + memset(p, 0, sizeof(nthw_stat_t)); + + return p; +} + +void nthw_stat_delete(nthw_stat_t *p) +{ + if (p) + free(p); +} + +int nthw_stat_init(nthw_stat_t *p, nthw_fpga_t *p_fpga, int n_instance) +{ + const char *const p_adapter_id_str = p_fpga->p_fpga_info->mp_adapter_id_str; + uint64_t n_module_version_packed64 = -1; + nthw_module_t *mod = nthw_fpga_query_module(p_fpga, MOD_STA, n_instance); + + if (p == NULL) + return mod == NULL ? -1 : 0; + + if (mod == NULL) { + NT_LOG(ERR, NTHW, "%s: STAT %d: no such instance", p_adapter_id_str, n_instance); + return -1; + } + + p->mp_fpga = p_fpga; + p->mn_instance = n_instance; + p->mp_mod_stat = mod; + + n_module_version_packed64 = nthw_module_get_version_packed64(p->mp_mod_stat); + NT_LOG(DBG, NTHW, "%s: STAT %d: version=0x%08lX", p_adapter_id_str, p->mn_instance, + n_module_version_packed64); + + { + nthw_register_t *p_reg; + /* STA_CFG register */ + p_reg = nthw_module_get_register(p->mp_mod_stat, STA_CFG); + p->mp_fld_dma_ena = nthw_register_get_field(p_reg, STA_CFG_DMA_ENA); + p->mp_fld_cnt_clear = nthw_register_get_field(p_reg, STA_CFG_CNT_CLEAR); + + /* CFG: fields NOT available from v. 3 */ + p->mp_fld_tx_disable = nthw_register_query_field(p_reg, STA_CFG_TX_DISABLE); + p->mp_fld_cnt_freeze = nthw_register_query_field(p_reg, STA_CFG_CNT_FRZ); + + /* STA_STATUS register */ + p_reg = nthw_module_get_register(p->mp_mod_stat, STA_STATUS); + p->mp_fld_stat_toggle_missed = + nthw_register_get_field(p_reg, STA_STATUS_STAT_TOGGLE_MISSED); + + /* HOST_ADR registers */ + p_reg = nthw_module_get_register(p->mp_mod_stat, STA_HOST_ADR_LSB); + p->mp_fld_dma_lsb = nthw_register_get_field(p_reg, STA_HOST_ADR_LSB_LSB); + + p_reg = nthw_module_get_register(p->mp_mod_stat, STA_HOST_ADR_MSB); + p->mp_fld_dma_msb = nthw_register_get_field(p_reg, STA_HOST_ADR_MSB_MSB); + + /* Binning cycles */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_BIN); + + if (p_reg) { + p->mp_fld_load_bin = nthw_register_get_field(p_reg, STA_LOAD_BIN_BIN); + + /* Bandwidth load for RX port 0 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_BPS_RX_0); + + if (p_reg) { + p->mp_fld_load_bps_rx0 = + nthw_register_get_field(p_reg, STA_LOAD_BPS_RX_0_BPS); + + } else { + p->mp_fld_load_bps_rx0 = NULL; + } + + /* Bandwidth load for RX port 1 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_BPS_RX_1); + + if (p_reg) { + p->mp_fld_load_bps_rx1 = + nthw_register_get_field(p_reg, STA_LOAD_BPS_RX_1_BPS); + + } else { + p->mp_fld_load_bps_rx1 = NULL; + } + + /* Bandwidth load for TX port 0 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_BPS_TX_0); + + if (p_reg) { + p->mp_fld_load_bps_tx0 = + nthw_register_get_field(p_reg, STA_LOAD_BPS_TX_0_BPS); + + } else { + p->mp_fld_load_bps_tx0 = NULL; + } + + /* Bandwidth load for TX port 1 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_BPS_TX_1); + + if (p_reg) { + p->mp_fld_load_bps_tx1 = + nthw_register_get_field(p_reg, STA_LOAD_BPS_TX_1_BPS); + + } else { + p->mp_fld_load_bps_tx1 = NULL; + } + + /* Packet load for RX port 0 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_PPS_RX_0); + + if (p_reg) { + p->mp_fld_load_pps_rx0 = + nthw_register_get_field(p_reg, STA_LOAD_PPS_RX_0_PPS); + + } else { + p->mp_fld_load_pps_rx0 = NULL; + } + + /* Packet load for RX port 1 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_PPS_RX_1); + + if (p_reg) { + p->mp_fld_load_pps_rx1 = + nthw_register_get_field(p_reg, STA_LOAD_PPS_RX_1_PPS); + + } else { + p->mp_fld_load_pps_rx1 = NULL; + } + + /* Packet load for TX port 0 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_PPS_TX_0); + + if (p_reg) { + p->mp_fld_load_pps_tx0 = + nthw_register_get_field(p_reg, STA_LOAD_PPS_TX_0_PPS); + + } else { + p->mp_fld_load_pps_tx0 = NULL; + } + + /* Packet load for TX port 1 */ + p_reg = nthw_module_query_register(p->mp_mod_stat, STA_LOAD_PPS_TX_1); + + if (p_reg) { + p->mp_fld_load_pps_tx1 = + nthw_register_get_field(p_reg, STA_LOAD_PPS_TX_1_PPS); + + } else { + p->mp_fld_load_pps_tx1 = NULL; + } + + } else { + p->mp_fld_load_bin = NULL; + p->mp_fld_load_bps_rx0 = NULL; + p->mp_fld_load_bps_rx1 = NULL; + p->mp_fld_load_bps_tx0 = NULL; + p->mp_fld_load_bps_tx1 = NULL; + p->mp_fld_load_pps_rx0 = NULL; + p->mp_fld_load_pps_rx1 = NULL; + p->mp_fld_load_pps_tx0 = NULL; + p->mp_fld_load_pps_tx1 = NULL; + } + } + + /* Params */ + p->m_nb_nim_ports = nthw_fpga_get_product_param(p_fpga, NT_NIMS, 0); + p->m_nb_phy_ports = nthw_fpga_get_product_param(p_fpga, NT_PHY_PORTS, 0); + + /* VSWITCH */ + p->m_nb_rx_ports = nthw_fpga_get_product_param(p_fpga, NT_STA_RX_PORTS, -1); + + if (p->m_nb_rx_ports == -1) { + /* non-VSWITCH */ + p->m_nb_rx_ports = nthw_fpga_get_product_param(p_fpga, NT_RX_PORTS, -1); + + if (p->m_nb_rx_ports == -1) { + /* non-VSWITCH */ + p->m_nb_rx_ports = nthw_fpga_get_product_param(p_fpga, NT_PORTS, 0); + } + } + + p->m_nb_rpp_per_ps = nthw_fpga_get_product_param(p_fpga, NT_RPP_PER_PS, 0); + + p->m_nb_tx_ports = nthw_fpga_get_product_param(p_fpga, NT_TX_PORTS, 0); + p->m_rx_port_replicate = nthw_fpga_get_product_param(p_fpga, NT_RX_PORT_REPLICATE, 0); + + /* VSWITCH */ + p->m_nb_color_counters = nthw_fpga_get_product_param(p_fpga, NT_STA_COLORS, 64) * 2; + + if (p->m_nb_color_counters == 0) { + /* non-VSWITCH */ + p->m_nb_color_counters = nthw_fpga_get_product_param(p_fpga, NT_CAT_FUNCS, 0) * 2; + } + + p->m_nb_rx_host_buffers = nthw_fpga_get_product_param(p_fpga, NT_QUEUES, 0); + p->m_nb_tx_host_buffers = p->m_nb_rx_host_buffers; + + p->m_dbs_present = nthw_fpga_get_product_param(p_fpga, NT_DBS_PRESENT, 0); + + p->m_nb_rx_hb_counters = (p->m_nb_rx_host_buffers * (6 + 2 * + (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ? + p->m_dbs_present : 0))); + + p->m_nb_tx_hb_counters = 0; + + p->m_nb_rx_port_counters = 42 + + 2 * (n_module_version_packed64 >= VERSION_PACKED64(0, 6) ? p->m_dbs_present : 0); + p->m_nb_tx_port_counters = 0; + + p->m_nb_counters = + p->m_nb_color_counters + p->m_nb_rx_hb_counters + p->m_nb_tx_hb_counters; + + p->mn_stat_layout_version = 0; + + if (n_module_version_packed64 >= VERSION_PACKED64(0, 9)) { + p->mn_stat_layout_version = 7; + + } else if (n_module_version_packed64 >= VERSION_PACKED64(0, 8)) { + p->mn_stat_layout_version = 6; + + } else if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) { + p->mn_stat_layout_version = 5; + + } else if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) { + p->mn_stat_layout_version = 4; + + } else if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) { + p->mn_stat_layout_version = 3; + + } else if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) { + p->mn_stat_layout_version = 2; + + } else if (n_module_version_packed64 > VERSION_PACKED64(0, 0)) { + p->mn_stat_layout_version = 1; + + } else { + p->mn_stat_layout_version = 0; + NT_LOG(ERR, NTHW, "%s: unknown module_version 0x%08lX layout=%d", + p_adapter_id_str, n_module_version_packed64, p->mn_stat_layout_version); + } + + assert(p->mn_stat_layout_version); + + /* STA module 0.2+ adds IPF counters per port (Rx feature) */ + if (n_module_version_packed64 >= VERSION_PACKED64(0, 2)) + p->m_nb_rx_port_counters += 6; + + /* STA module 0.3+ adds TX stats */ + if (n_module_version_packed64 >= VERSION_PACKED64(0, 3) || p->m_nb_tx_ports >= 1) + p->mb_has_tx_stats = true; + + /* STA module 0.3+ adds TX stat counters */ + if (n_module_version_packed64 >= VERSION_PACKED64(0, 3)) + p->m_nb_tx_port_counters += 22; + + /* STA module 0.4+ adds TX drop event counter */ + if (n_module_version_packed64 >= VERSION_PACKED64(0, 4)) + p->m_nb_tx_port_counters += 1; /* TX drop event counter */ + + /* + * STA module 0.6+ adds pkt filter drop octets+pkts, retransmit and + * duplicate counters + */ + if (n_module_version_packed64 >= VERSION_PACKED64(0, 6)) { + p->m_nb_rx_port_counters += 4; + p->m_nb_tx_port_counters += 1; + } + + p->m_nb_counters += (p->m_nb_rx_ports * p->m_nb_rx_port_counters); + + if (p->mb_has_tx_stats) + p->m_nb_counters += (p->m_nb_tx_ports * p->m_nb_tx_port_counters); + + /* Output params (debug) */ + NT_LOG(DBG, NTHW, "%s: nims=%d rxports=%d txports=%d rxrepl=%d colors=%d queues=%d", + p_adapter_id_str, p->m_nb_nim_ports, p->m_nb_rx_ports, p->m_nb_tx_ports, + p->m_rx_port_replicate, p->m_nb_color_counters, p->m_nb_rx_host_buffers); + NT_LOG(DBG, NTHW, "%s: hbs=%d hbcounters=%d rxcounters=%d txcounters=%d", + p_adapter_id_str, p->m_nb_rx_host_buffers, p->m_nb_rx_hb_counters, + p->m_nb_rx_port_counters, p->m_nb_tx_port_counters); + NT_LOG(DBG, NTHW, "%s: layout=%d", p_adapter_id_str, p->mn_stat_layout_version); + NT_LOG(DBG, NTHW, "%s: counters=%d (0x%X)", p_adapter_id_str, p->m_nb_counters, + p->m_nb_counters); + + /* Init */ + if (p->mp_fld_tx_disable) + nthw_field_set_flush(p->mp_fld_tx_disable); + + nthw_field_update_register(p->mp_fld_cnt_clear); + nthw_field_set_flush(p->mp_fld_cnt_clear); + nthw_field_clr_flush(p->mp_fld_cnt_clear); + + nthw_field_update_register(p->mp_fld_stat_toggle_missed); + nthw_field_set_flush(p->mp_fld_stat_toggle_missed); + + nthw_field_update_register(p->mp_fld_dma_ena); + nthw_field_clr_flush(p->mp_fld_dma_ena); + nthw_field_update_register(p->mp_fld_dma_ena); + + /* Set the sliding windows size for port load */ + if (p->mp_fld_load_bin) { + uint32_t rpp = nthw_fpga_get_product_param(p_fpga, NT_RPP_PER_PS, 0); + uint32_t bin = + (uint32_t)(((PORT_LOAD_WINDOWS_SIZE * 1000000000000ULL) / (32ULL * rpp)) - + 1ULL); + nthw_field_set_val_flush32(p->mp_fld_load_bin, bin); + } + + return 0; +} + +int nthw_stat_set_dma_address(nthw_stat_t *p, uint64_t stat_dma_physical, + uint32_t *p_stat_dma_virtual) +{ + assert(p_stat_dma_virtual); + p->mp_timestamp = NULL; + + p->m_stat_dma_physical = stat_dma_physical; + p->mp_stat_dma_virtual = p_stat_dma_virtual; + + memset(p->mp_stat_dma_virtual, 0, (p->m_nb_counters * sizeof(uint32_t))); + + nthw_field_set_val_flush32(p->mp_fld_dma_msb, + (uint32_t)((p->m_stat_dma_physical >> 32) & 0xffffffff)); + nthw_field_set_val_flush32(p->mp_fld_dma_lsb, + (uint32_t)(p->m_stat_dma_physical & 0xffffffff)); + + p->mp_timestamp = (uint64_t *)(p->mp_stat_dma_virtual + p->m_nb_counters); + NT_LOG(DBG, NTHW, + "stat_dma_physical=%" PRIX64 " p_stat_dma_virtual=%" PRIX64 + " mp_timestamp=%" PRIX64 "", p->m_stat_dma_physical, + (uint64_t)p->mp_stat_dma_virtual, (uint64_t)p->mp_timestamp); + *p->mp_timestamp = (uint64_t)(int64_t)-1; + return 0; +} + +int nthw_stat_trigger(nthw_stat_t *p) +{ + int n_toggle_miss = nthw_field_get_updated(p->mp_fld_stat_toggle_missed); + + if (n_toggle_miss) + nthw_field_set_flush(p->mp_fld_stat_toggle_missed); + + if (p->mp_timestamp) + *p->mp_timestamp = -1; /* Clear old ts */ + + nthw_field_update_register(p->mp_fld_dma_ena); + nthw_field_set_flush(p->mp_fld_dma_ena); + + return 0; +} + +int nthw_stat_get_load_bps_rx(nthw_stat_t *p, uint8_t port, uint32_t *val) +{ + switch (port) { + case 0: + if (p->mp_fld_load_bps_rx0) { + *val = nthw_field_get_updated(p->mp_fld_load_bps_rx0); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + case 1: + if (p->mp_fld_load_bps_rx1) { + *val = nthw_field_get_updated(p->mp_fld_load_bps_rx1); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + default: + return -1; + } +} + +int nthw_stat_get_load_bps_tx(nthw_stat_t *p, uint8_t port, uint32_t *val) +{ + switch (port) { + case 0: + if (p->mp_fld_load_bps_tx0) { + *val = nthw_field_get_updated(p->mp_fld_load_bps_tx0); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + case 1: + if (p->mp_fld_load_bps_tx1) { + *val = nthw_field_get_updated(p->mp_fld_load_bps_tx1); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + default: + return -1; + } +} + +int nthw_stat_get_load_pps_rx(nthw_stat_t *p, uint8_t port, uint32_t *val) +{ + switch (port) { + case 0: + if (p->mp_fld_load_pps_rx0) { + *val = nthw_field_get_updated(p->mp_fld_load_pps_rx0); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + case 1: + if (p->mp_fld_load_pps_rx1) { + *val = nthw_field_get_updated(p->mp_fld_load_pps_rx1); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + default: + return -1; + } +} + +int nthw_stat_get_load_pps_tx(nthw_stat_t *p, uint8_t port, uint32_t *val) +{ + switch (port) { + case 0: + if (p->mp_fld_load_pps_tx0) { + *val = nthw_field_get_updated(p->mp_fld_load_pps_tx0); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + case 1: + if (p->mp_fld_load_pps_tx1) { + *val = nthw_field_get_updated(p->mp_fld_load_pps_tx1); + return 0; + + } else { + *val = 0; + return -1; + } + + break; + + default: + return -1; + } +} diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_049_0000.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_049_0000.c index 6df7208649..f1033ca949 100644 --- a/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_049_0000.c +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_9563_055_049_0000.c @@ -270,1470 +270,2732 @@ static nthw_fpga_register_init_s cat_registers[] = { { CAT_RCK_DATA, 3, 32, NTHW_FPGA_REG_TYPE_WO, 0, 32, cat_rck_data_fields }, }; -static nthw_fpga_field_init_s gfg_burstsize0_fields[] = { - { GFG_BURSTSIZE0_VAL, 24, 0, 0 }, +static nthw_fpga_field_init_s cpy_packet_reader0_ctrl_fields[] = { + { CPY_PACKET_READER0_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_PACKET_READER0_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_burstsize1_fields[] = { - { GFG_BURSTSIZE1_VAL, 24, 0, 0 }, +static nthw_fpga_field_init_s cpy_packet_reader0_data_fields[] = { + { CPY_PACKET_READER0_DATA_DYN, 5, 10, 0x0000 }, + { CPY_PACKET_READER0_DATA_OFS, 10, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_ctrl0_fields[] = { - { GFG_CTRL0_ENABLE, 1, 0, 0 }, - { GFG_CTRL0_MODE, 3, 1, 0 }, - { GFG_CTRL0_PRBS_EN, 1, 4, 0 }, - { GFG_CTRL0_SIZE, 14, 16, 64 }, +static nthw_fpga_field_init_s cpy_writer0_ctrl_fields[] = { + { CPY_WRITER0_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER0_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_ctrl1_fields[] = { - { GFG_CTRL1_ENABLE, 1, 0, 0 }, - { GFG_CTRL1_MODE, 3, 1, 0 }, - { GFG_CTRL1_PRBS_EN, 1, 4, 0 }, - { GFG_CTRL1_SIZE, 14, 16, 64 }, +static nthw_fpga_field_init_s cpy_writer0_data_fields[] = { + { CPY_WRITER0_DATA_DYN, 5, 17, 0x0000 }, { CPY_WRITER0_DATA_LEN, 5, 22, 0x0000 }, + { CPY_WRITER0_DATA_MASK_POINTER, 4, 27, 0x0000 }, { CPY_WRITER0_DATA_OFS, 14, 3, 0x0000 }, + { CPY_WRITER0_DATA_READER_SELECT, 3, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_run0_fields[] = { - { GFG_RUN0_RUN, 1, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer0_mask_ctrl_fields[] = { + { CPY_WRITER0_MASK_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER0_MASK_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_run1_fields[] = { - { GFG_RUN1_RUN, 1, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer0_mask_data_fields[] = { + { CPY_WRITER0_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_sizemask0_fields[] = { - { GFG_SIZEMASK0_VAL, 14, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer1_ctrl_fields[] = { + { CPY_WRITER1_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER1_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_sizemask1_fields[] = { - { GFG_SIZEMASK1_VAL, 14, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer1_data_fields[] = { + { CPY_WRITER1_DATA_DYN, 5, 17, 0x0000 }, { CPY_WRITER1_DATA_LEN, 5, 22, 0x0000 }, + { CPY_WRITER1_DATA_MASK_POINTER, 4, 27, 0x0000 }, { CPY_WRITER1_DATA_OFS, 14, 3, 0x0000 }, + { CPY_WRITER1_DATA_READER_SELECT, 3, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_streamid0_fields[] = { - { GFG_STREAMID0_VAL, 8, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer1_mask_ctrl_fields[] = { + { CPY_WRITER1_MASK_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER1_MASK_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gfg_streamid1_fields[] = { - { GFG_STREAMID1_VAL, 8, 0, 1 }, +static nthw_fpga_field_init_s cpy_writer1_mask_data_fields[] = { + { CPY_WRITER1_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 }, }; -static nthw_fpga_register_init_s gfg_registers[] = { - { GFG_BURSTSIZE0, 3, 24, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_burstsize0_fields }, - { GFG_BURSTSIZE1, 8, 24, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_burstsize1_fields }, - { GFG_CTRL0, 0, 30, NTHW_FPGA_REG_TYPE_WO, 4194304, 4, gfg_ctrl0_fields }, - { GFG_CTRL1, 5, 30, NTHW_FPGA_REG_TYPE_WO, 4194304, 4, gfg_ctrl1_fields }, - { GFG_RUN0, 1, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_run0_fields }, - { GFG_RUN1, 6, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_run1_fields }, - { GFG_SIZEMASK0, 4, 14, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_sizemask0_fields }, - { GFG_SIZEMASK1, 9, 14, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_sizemask1_fields }, - { GFG_STREAMID0, 2, 8, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_streamid0_fields }, - { GFG_STREAMID1, 7, 8, NTHW_FPGA_REG_TYPE_WO, 1, 1, gfg_streamid1_fields }, +static nthw_fpga_field_init_s cpy_writer2_ctrl_fields[] = { + { CPY_WRITER2_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER2_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_ctrl_fields[] = { - { GMF_CTRL_ENABLE, 1, 0, 0 }, - { GMF_CTRL_FCS_ALWAYS, 1, 1, 0 }, - { GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 }, - { GMF_CTRL_IFG_ENABLE, 1, 2, 0 }, - { GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 }, - { GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 }, - { GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 }, - { GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 }, - { GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 }, - { GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 }, +static nthw_fpga_field_init_s cpy_writer2_data_fields[] = { + { CPY_WRITER2_DATA_DYN, 5, 17, 0x0000 }, { CPY_WRITER2_DATA_LEN, 5, 22, 0x0000 }, + { CPY_WRITER2_DATA_MASK_POINTER, 4, 27, 0x0000 }, { CPY_WRITER2_DATA_OFS, 14, 3, 0x0000 }, + { CPY_WRITER2_DATA_READER_SELECT, 3, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_debug_lane_marker_fields[] = { - { GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 }, +static nthw_fpga_field_init_s cpy_writer2_mask_ctrl_fields[] = { + { CPY_WRITER2_MASK_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER2_MASK_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_ifg_max_adjust_slack_fields[] = { - { GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer2_mask_data_fields[] = { + { CPY_WRITER2_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_ifg_set_clock_delta_fields[] = { - { GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer3_ctrl_fields[] = { + { CPY_WRITER3_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER3_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_ifg_set_clock_delta_adjust_fields[] = { - { GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer3_data_fields[] = { + { CPY_WRITER3_DATA_DYN, 5, 17, 0x0000 }, { CPY_WRITER3_DATA_LEN, 5, 22, 0x0000 }, + { CPY_WRITER3_DATA_MASK_POINTER, 4, 27, 0x0000 }, { CPY_WRITER3_DATA_OFS, 14, 3, 0x0000 }, + { CPY_WRITER3_DATA_READER_SELECT, 3, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_ifg_tx_now_on_ts_fields[] = { - { GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer3_mask_ctrl_fields[] = { + { CPY_WRITER3_MASK_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER3_MASK_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_speed_fields[] = { - { GMF_SPEED_IFG_SPEED, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer3_mask_data_fields[] = { + { CPY_WRITER3_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_stat_data_buffer_fields[] = { - { GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 }, +static nthw_fpga_field_init_s cpy_writer4_ctrl_fields[] = { + { CPY_WRITER4_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER4_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_stat_max_delayed_pkt_fields[] = { - { GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer4_data_fields[] = { + { CPY_WRITER4_DATA_DYN, 5, 17, 0x0000 }, { CPY_WRITER4_DATA_LEN, 5, 22, 0x0000 }, + { CPY_WRITER4_DATA_MASK_POINTER, 4, 27, 0x0000 }, { CPY_WRITER4_DATA_OFS, 14, 3, 0x0000 }, + { CPY_WRITER4_DATA_READER_SELECT, 3, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_stat_next_pkt_fields[] = { - { GMF_STAT_NEXT_PKT_NS, 64, 0, 0 }, +static nthw_fpga_field_init_s cpy_writer4_mask_ctrl_fields[] = { + { CPY_WRITER4_MASK_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER4_MASK_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_stat_sticky_fields[] = { - { GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 }, - { GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 }, +static nthw_fpga_field_init_s cpy_writer4_mask_data_fields[] = { + { CPY_WRITER4_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 }, }; -static nthw_fpga_field_init_s gmf_ts_inject_fields[] = { - { GMF_TS_INJECT_OFFSET, 14, 0, 0 }, - { GMF_TS_INJECT_POS, 2, 14, 0 }, +static nthw_fpga_field_init_s cpy_writer5_ctrl_fields[] = { + { CPY_WRITER5_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER5_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_register_init_s gmf_registers[] = { - { GMF_CTRL, 0, 10, NTHW_FPGA_REG_TYPE_WO, 0, 10, gmf_ctrl_fields }, +static nthw_fpga_field_init_s cpy_writer5_data_fields[] = { + { CPY_WRITER5_DATA_DYN, 5, 17, 0x0000 }, { CPY_WRITER5_DATA_LEN, 5, 22, 0x0000 }, + { CPY_WRITER5_DATA_MASK_POINTER, 4, 27, 0x0000 }, { CPY_WRITER5_DATA_OFS, 14, 3, 0x0000 }, + { CPY_WRITER5_DATA_READER_SELECT, 3, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s cpy_writer5_mask_ctrl_fields[] = { + { CPY_WRITER5_MASK_CTRL_ADR, 4, 0, 0x0000 }, + { CPY_WRITER5_MASK_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s cpy_writer5_mask_data_fields[] = { + { CPY_WRITER5_MASK_DATA_BYTE_MASK, 16, 0, 0x0000 }, +}; + +static nthw_fpga_register_init_s cpy_registers[] = { { - GMF_DEBUG_LANE_MARKER, 7, 16, NTHW_FPGA_REG_TYPE_WO, 16384, 1, - gmf_debug_lane_marker_fields + CPY_PACKET_READER0_CTRL, 24, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_packet_reader0_ctrl_fields }, { - GMF_IFG_MAX_ADJUST_SLACK, 4, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, - gmf_ifg_max_adjust_slack_fields + CPY_PACKET_READER0_DATA, 25, 15, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_packet_reader0_data_fields }, + { CPY_WRITER0_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, cpy_writer0_ctrl_fields }, + { CPY_WRITER0_DATA, 1, 31, NTHW_FPGA_REG_TYPE_WO, 0, 5, cpy_writer0_data_fields }, { - GMF_IFG_SET_CLOCK_DELTA, 2, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, - gmf_ifg_set_clock_delta_fields + CPY_WRITER0_MASK_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_writer0_mask_ctrl_fields }, { - GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, - gmf_ifg_set_clock_delta_adjust_fields + CPY_WRITER0_MASK_DATA, 3, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, + cpy_writer0_mask_data_fields }, - { GMF_IFG_TX_NOW_ON_TS, 5, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, gmf_ifg_tx_now_on_ts_fields }, - { GMF_SPEED, 1, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, gmf_speed_fields }, - { GMF_STAT_DATA_BUFFER, 9, 15, NTHW_FPGA_REG_TYPE_RO, 0, 1, gmf_stat_data_buffer_fields }, + { CPY_WRITER1_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, cpy_writer1_ctrl_fields }, + { CPY_WRITER1_DATA, 5, 31, NTHW_FPGA_REG_TYPE_WO, 0, 5, cpy_writer1_data_fields }, { - GMF_STAT_MAX_DELAYED_PKT, 11, 64, NTHW_FPGA_REG_TYPE_RC1, 0, 1, - gmf_stat_max_delayed_pkt_fields + CPY_WRITER1_MASK_CTRL, 6, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_writer1_mask_ctrl_fields + }, + { + CPY_WRITER1_MASK_DATA, 7, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, + cpy_writer1_mask_data_fields + }, + { CPY_WRITER2_CTRL, 8, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, cpy_writer2_ctrl_fields }, + { CPY_WRITER2_DATA, 9, 31, NTHW_FPGA_REG_TYPE_WO, 0, 5, cpy_writer2_data_fields }, + { + CPY_WRITER2_MASK_CTRL, 10, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_writer2_mask_ctrl_fields + }, + { + CPY_WRITER2_MASK_DATA, 11, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, + cpy_writer2_mask_data_fields + }, + { CPY_WRITER3_CTRL, 12, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, cpy_writer3_ctrl_fields }, + { CPY_WRITER3_DATA, 13, 31, NTHW_FPGA_REG_TYPE_WO, 0, 5, cpy_writer3_data_fields }, + { + CPY_WRITER3_MASK_CTRL, 14, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_writer3_mask_ctrl_fields + }, + { + CPY_WRITER3_MASK_DATA, 15, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, + cpy_writer3_mask_data_fields + }, + { CPY_WRITER4_CTRL, 16, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, cpy_writer4_ctrl_fields }, + { CPY_WRITER4_DATA, 17, 31, NTHW_FPGA_REG_TYPE_WO, 0, 5, cpy_writer4_data_fields }, + { + CPY_WRITER4_MASK_CTRL, 18, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_writer4_mask_ctrl_fields + }, + { + CPY_WRITER4_MASK_DATA, 19, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, + cpy_writer4_mask_data_fields + }, + { CPY_WRITER5_CTRL, 20, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, cpy_writer5_ctrl_fields }, + { CPY_WRITER5_DATA, 21, 31, NTHW_FPGA_REG_TYPE_WO, 0, 5, cpy_writer5_data_fields }, + { + CPY_WRITER5_MASK_CTRL, 22, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, + cpy_writer5_mask_ctrl_fields + }, + { + CPY_WRITER5_MASK_DATA, 23, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, + cpy_writer5_mask_data_fields }, - { GMF_STAT_NEXT_PKT, 10, 64, NTHW_FPGA_REG_TYPE_RO, 0, 1, gmf_stat_next_pkt_fields }, - { GMF_STAT_STICKY, 8, 2, NTHW_FPGA_REG_TYPE_RC1, 0, 2, gmf_stat_sticky_fields }, - { GMF_TS_INJECT, 6, 16, NTHW_FPGA_REG_TYPE_WO, 0, 2, gmf_ts_inject_fields }, }; -static nthw_fpga_field_init_s gpio_phy_cfg_fields[] = { - { GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 }, { GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 }, - { GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 }, { GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 }, - { GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 }, { GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 }, - { GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 }, { GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 }, - { GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 }, { GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 }, +static nthw_fpga_field_init_s csu_rcp_ctrl_fields[] = { + { CSU_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { CSU_RCP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s gpio_phy_gpio_fields[] = { - { GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 }, { GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 }, - { GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 }, { GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 }, - { GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 }, { GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 }, - { GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 }, { GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 }, - { GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 }, { GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 }, +static nthw_fpga_field_init_s csu_rcp_data_fields[] = { + { CSU_RCP_DATA_IL3_CMD, 2, 5, 0x0000 }, + { CSU_RCP_DATA_IL4_CMD, 3, 7, 0x0000 }, + { CSU_RCP_DATA_OL3_CMD, 2, 0, 0x0000 }, + { CSU_RCP_DATA_OL4_CMD, 3, 2, 0x0000 }, }; -static nthw_fpga_register_init_s gpio_phy_registers[] = { - { GPIO_PHY_CFG, 0, 10, NTHW_FPGA_REG_TYPE_RW, 170, 10, gpio_phy_cfg_fields }, - { GPIO_PHY_GPIO, 1, 10, NTHW_FPGA_REG_TYPE_RW, 17, 10, gpio_phy_gpio_fields }, +static nthw_fpga_register_init_s csu_registers[] = { + { CSU_RCP_CTRL, 1, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, csu_rcp_ctrl_fields }, + { CSU_RCP_DATA, 2, 10, NTHW_FPGA_REG_TYPE_WO, 0, 4, csu_rcp_data_fields }, }; -static nthw_fpga_field_init_s hif_build_time_fields[] = { - { HIF_BUILD_TIME_TIME, 32, 0, 1726740521 }, +static nthw_fpga_field_init_s dbs_rx_am_ctrl_fields[] = { + { DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hif_config_fields[] = { - { HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 }, - { HIF_CONFIG_MAX_READ, 3, 3, 0x0000 }, - { HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 }, +static nthw_fpga_field_init_s dbs_rx_am_data_fields[] = { + { DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 }, { DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 }, + { DBS_RX_AM_DATA_HID, 8, 64, 0x0000 }, { DBS_RX_AM_DATA_INT, 1, 74, 0x0000 }, + { DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 }, }; -static nthw_fpga_field_init_s hif_control_fields[] = { - { HIF_CONTROL_BLESSED, 8, 4, 0 }, - { HIF_CONTROL_FSR, 1, 12, 1 }, - { HIF_CONTROL_WRAW, 4, 0, 1 }, +static nthw_fpga_field_init_s dbs_rx_control_fields[] = { + { DBS_RX_CONTROL_AME, 1, 7, 0 }, { DBS_RX_CONTROL_AMS, 4, 8, 8 }, + { DBS_RX_CONTROL_LQ, 7, 0, 0 }, { DBS_RX_CONTROL_QE, 1, 17, 0 }, + { DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 }, }; -static nthw_fpga_field_init_s hif_prod_id_ex_fields[] = { - { HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 }, - { HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 }, - { HIF_PROD_ID_EX_RESERVED, 23, 8, 0 }, +static nthw_fpga_field_init_s dbs_rx_dr_ctrl_fields[] = { + { DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hif_prod_id_lsb_fields[] = { - { HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 }, - { HIF_PROD_ID_LSB_REV_ID, 8, 0, 49 }, - { HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 }, +static nthw_fpga_field_init_s dbs_rx_dr_data_fields[] = { + { DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 }, { DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 }, + { DBS_RX_DR_DATA_HID, 8, 64, 0x0000 }, { DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 }, + { DBS_RX_DR_DATA_QS, 15, 72, 0x0000 }, }; -static nthw_fpga_field_init_s hif_prod_id_msb_fields[] = { - { HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 }, - { HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 }, +static nthw_fpga_field_init_s dbs_rx_idle_fields[] = { + { DBS_RX_IDLE_BUSY, 1, 8, 0 }, + { DBS_RX_IDLE_IDLE, 1, 0, 0x0000 }, + { DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 }, }; -static nthw_fpga_field_init_s hif_sample_time_fields[] = { - { HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 }, +static nthw_fpga_field_init_s dbs_rx_init_fields[] = { + { DBS_RX_INIT_BUSY, 1, 8, 0 }, + { DBS_RX_INIT_INIT, 1, 0, 0x0000 }, + { DBS_RX_INIT_QUEUE, 7, 1, 0x0000 }, }; -static nthw_fpga_field_init_s hif_status_fields[] = { - { HIF_STATUS_RD_ERR, 1, 9, 0 }, - { HIF_STATUS_TAGS_IN_USE, 8, 0, 0 }, - { HIF_STATUS_WR_ERR, 1, 8, 0 }, +static nthw_fpga_field_init_s dbs_rx_init_val_fields[] = { + { DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 }, + { DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hif_stat_ctrl_fields[] = { - { HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 }, - { HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 }, +static nthw_fpga_field_init_s dbs_rx_ptr_fields[] = { + { DBS_RX_PTR_PTR, 16, 0, 0x0000 }, + { DBS_RX_PTR_QUEUE, 7, 16, 0x0000 }, + { DBS_RX_PTR_VALID, 1, 23, 0x0000 }, }; -static nthw_fpga_field_init_s hif_stat_refclk_fields[] = { - { HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 }, +static nthw_fpga_field_init_s dbs_rx_uw_ctrl_fields[] = { + { DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hif_stat_rx_fields[] = { - { HIF_STAT_RX_COUNTER, 32, 0, 0 }, +static nthw_fpga_field_init_s dbs_rx_uw_data_fields[] = { + { DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 }, { DBS_RX_UW_DATA_HID, 8, 64, 0x0000 }, + { DBS_RX_UW_DATA_INT, 1, 88, 0x0000 }, { DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 }, + { DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 }, { DBS_RX_UW_DATA_QS, 15, 72, 0x0000 }, + { DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 }, }; -static nthw_fpga_field_init_s hif_stat_tx_fields[] = { - { HIF_STAT_TX_COUNTER, 32, 0, 0 }, +static nthw_fpga_field_init_s dbs_tx_am_ctrl_fields[] = { + { DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hif_test0_fields[] = { - { HIF_TEST0_DATA, 32, 0, 287454020 }, +static nthw_fpga_field_init_s dbs_tx_am_data_fields[] = { + { DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 }, { DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 }, + { DBS_TX_AM_DATA_HID, 8, 64, 0x0000 }, { DBS_TX_AM_DATA_INT, 1, 74, 0x0000 }, + { DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 }, }; -static nthw_fpga_field_init_s hif_test1_fields[] = { - { HIF_TEST1_DATA, 32, 0, 2864434397 }, +static nthw_fpga_field_init_s dbs_tx_control_fields[] = { + { DBS_TX_CONTROL_AME, 1, 7, 0 }, { DBS_TX_CONTROL_AMS, 4, 8, 5 }, + { DBS_TX_CONTROL_LQ, 7, 0, 0 }, { DBS_TX_CONTROL_QE, 1, 17, 0 }, + { DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 }, }; -static nthw_fpga_field_init_s hif_uuid0_fields[] = { - { HIF_UUID0_UUID0, 32, 0, 1021928912 }, +static nthw_fpga_field_init_s dbs_tx_dr_ctrl_fields[] = { + { DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hif_uuid1_fields[] = { - { HIF_UUID1_UUID1, 32, 0, 2998983545 }, +static nthw_fpga_field_init_s dbs_tx_dr_data_fields[] = { + { DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 }, { DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 }, + { DBS_TX_DR_DATA_HID, 8, 64, 0x0000 }, { DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 }, + { DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 }, { DBS_TX_DR_DATA_QS, 15, 72, 0x0000 }, }; -static nthw_fpga_field_init_s hif_uuid2_fields[] = { - { HIF_UUID2_UUID2, 32, 0, 827210969 }, +static nthw_fpga_field_init_s dbs_tx_idle_fields[] = { + { DBS_TX_IDLE_BUSY, 1, 8, 0 }, + { DBS_TX_IDLE_IDLE, 1, 0, 0x0000 }, + { DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 }, }; -static nthw_fpga_field_init_s hif_uuid3_fields[] = { - { HIF_UUID3_UUID3, 32, 0, 462142918 }, +static nthw_fpga_field_init_s dbs_tx_init_fields[] = { + { DBS_TX_INIT_BUSY, 1, 8, 0 }, + { DBS_TX_INIT_INIT, 1, 0, 0x0000 }, + { DBS_TX_INIT_QUEUE, 7, 1, 0x0000 }, }; -static nthw_fpga_register_init_s hif_registers[] = { - { HIF_BUILD_TIME, 16, 32, NTHW_FPGA_REG_TYPE_RO, 1726740521, 1, hif_build_time_fields }, - { HIF_CONFIG, 24, 7, NTHW_FPGA_REG_TYPE_RW, 0, 3, hif_config_fields }, - { HIF_CONTROL, 40, 13, NTHW_FPGA_REG_TYPE_MIXED, 4097, 3, hif_control_fields }, - { HIF_PROD_ID_EX, 112, 32, NTHW_FPGA_REG_TYPE_RO, 1, 3, hif_prod_id_ex_fields }, - { HIF_PROD_ID_LSB, 0, 32, NTHW_FPGA_REG_TYPE_RO, 626734897, 3, hif_prod_id_lsb_fields }, - { HIF_PROD_ID_MSB, 8, 22, NTHW_FPGA_REG_TYPE_RO, 200, 2, hif_prod_id_msb_fields }, - { HIF_SAMPLE_TIME, 96, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, hif_sample_time_fields }, - { HIF_STATUS, 32, 10, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, hif_status_fields }, - { HIF_STAT_CTRL, 64, 2, NTHW_FPGA_REG_TYPE_WO, 0, 2, hif_stat_ctrl_fields }, - { HIF_STAT_REFCLK, 72, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, hif_stat_refclk_fields }, - { HIF_STAT_RX, 88, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, hif_stat_rx_fields }, - { HIF_STAT_TX, 80, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, hif_stat_tx_fields }, - { HIF_TEST0, 48, 32, NTHW_FPGA_REG_TYPE_RW, 287454020, 1, hif_test0_fields }, - { HIF_TEST1, 56, 32, NTHW_FPGA_REG_TYPE_RW, 2864434397, 1, hif_test1_fields }, - { HIF_UUID0, 128, 32, NTHW_FPGA_REG_TYPE_RO, 1021928912, 1, hif_uuid0_fields }, - { HIF_UUID1, 144, 32, NTHW_FPGA_REG_TYPE_RO, 2998983545, 1, hif_uuid1_fields }, - { HIF_UUID2, 160, 32, NTHW_FPGA_REG_TYPE_RO, 827210969, 1, hif_uuid2_fields }, - { HIF_UUID3, 176, 32, NTHW_FPGA_REG_TYPE_RO, 462142918, 1, hif_uuid3_fields }, +static nthw_fpga_field_init_s dbs_tx_init_val_fields[] = { + { DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 }, + { DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 }, }; -static nthw_fpga_field_init_s hsh_rcp_ctrl_fields[] = { - { HSH_RCP_CTRL_ADR, 4, 0, 0x0000 }, - { HSH_RCP_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s dbs_tx_ptr_fields[] = { + { DBS_TX_PTR_PTR, 16, 0, 0x0000 }, + { DBS_TX_PTR_QUEUE, 7, 16, 0x0000 }, + { DBS_TX_PTR_VALID, 1, 23, 0x0000 }, }; -static nthw_fpga_field_init_s hsh_rcp_data_fields[] = { - { HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 }, - { HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 }, - { HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 }, - { HSH_RCP_DATA_K, 320, 422, 0x0000 }, - { HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 }, - { HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 }, - { HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 }, - { HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 }, - { HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 }, - { HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 }, - { HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 }, - { HSH_RCP_DATA_SEED, 32, 382, 0x0000 }, - { HSH_RCP_DATA_SORT, 2, 4, 0x0000 }, - { HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 }, - { HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 }, - { HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 }, - { HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 }, - { HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 }, - { HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 }, - { HSH_RCP_DATA_W9_P, 1, 60, 0x0000 }, - { HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 }, - { HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 }, - { HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 }, +static nthw_fpga_field_init_s dbs_tx_qos_ctrl_fields[] = { + { DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 }, + { DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_register_init_s hsh_registers[] = { - { HSH_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields }, - { HSH_RCP_DATA, 1, 743, NTHW_FPGA_REG_TYPE_WO, 0, 23, hsh_rcp_data_fields }, +static nthw_fpga_field_init_s dbs_tx_qos_data_fields[] = { + { DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 }, + { DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 }, + { DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 }, }; -static nthw_fpga_field_init_s iic_adr_fields[] = { - { IIC_ADR_SLV_ADR, 7, 1, 0 }, +static nthw_fpga_field_init_s dbs_tx_qos_rate_fields[] = { + { DBS_TX_QOS_RATE_DIV, 19, 16, 2 }, + { DBS_TX_QOS_RATE_MUL, 16, 0, 1 }, }; -static nthw_fpga_field_init_s iic_cr_fields[] = { - { IIC_CR_EN, 1, 0, 0 }, { IIC_CR_GC_EN, 1, 6, 0 }, { IIC_CR_MSMS, 1, 2, 0 }, - { IIC_CR_RST, 1, 7, 0 }, { IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 }, - { IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 }, +static nthw_fpga_field_init_s dbs_tx_qp_ctrl_fields[] = { + { DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s iic_dgie_fields[] = { - { IIC_DGIE_GIE, 1, 31, 0 }, +static nthw_fpga_field_init_s dbs_tx_qp_data_fields[] = { + { DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 }, }; -static nthw_fpga_field_init_s iic_gpo_fields[] = { - { IIC_GPO_GPO_VAL, 1, 0, 0 }, +static nthw_fpga_field_init_s dbs_tx_uw_ctrl_fields[] = { + { DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 }, + { DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s iic_ier_fields[] = { - { IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 }, { IIC_IER_INT2, 1, 2, 0 }, - { IIC_IER_INT3, 1, 3, 0 }, { IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 }, - { IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 }, +static nthw_fpga_field_init_s dbs_tx_uw_data_fields[] = { + { DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 }, { DBS_TX_UW_DATA_HID, 8, 64, 0x0000 }, + { DBS_TX_UW_DATA_INO, 1, 93, 0x0000 }, { DBS_TX_UW_DATA_INT, 1, 88, 0x0000 }, + { DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 }, { DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 }, + { DBS_TX_UW_DATA_QS, 15, 72, 0x0000 }, { DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 }, }; -static nthw_fpga_field_init_s iic_isr_fields[] = { - { IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 }, { IIC_ISR_INT2, 1, 2, 0 }, - { IIC_ISR_INT3, 1, 3, 0 }, { IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 }, - { IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 }, +static nthw_fpga_register_init_s dbs_registers[] = { + { DBS_RX_AM_CTRL, 10, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_am_ctrl_fields }, + { DBS_RX_AM_DATA, 11, 75, NTHW_FPGA_REG_TYPE_WO, 0, 5, dbs_rx_am_data_fields }, + { DBS_RX_CONTROL, 0, 18, NTHW_FPGA_REG_TYPE_RW, 43008, 6, dbs_rx_control_fields }, + { DBS_RX_DR_CTRL, 18, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_dr_ctrl_fields }, + { DBS_RX_DR_DATA, 19, 89, NTHW_FPGA_REG_TYPE_WO, 0, 5, dbs_rx_dr_data_fields }, + { DBS_RX_IDLE, 8, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_rx_idle_fields }, + { DBS_RX_INIT, 2, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_rx_init_fields }, + { DBS_RX_INIT_VAL, 3, 31, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_init_val_fields }, + { DBS_RX_PTR, 4, 24, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields }, + { DBS_RX_UW_CTRL, 14, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_uw_ctrl_fields }, + { DBS_RX_UW_DATA, 15, 93, NTHW_FPGA_REG_TYPE_WO, 0, 7, dbs_rx_uw_data_fields }, + { DBS_TX_AM_CTRL, 12, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_am_ctrl_fields }, + { DBS_TX_AM_DATA, 13, 75, NTHW_FPGA_REG_TYPE_WO, 0, 5, dbs_tx_am_data_fields }, + { DBS_TX_CONTROL, 1, 18, NTHW_FPGA_REG_TYPE_RW, 66816, 6, dbs_tx_control_fields }, + { DBS_TX_DR_CTRL, 20, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_dr_ctrl_fields }, + { DBS_TX_DR_DATA, 21, 90, NTHW_FPGA_REG_TYPE_WO, 0, 6, dbs_tx_dr_data_fields }, + { DBS_TX_IDLE, 9, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_tx_idle_fields }, + { DBS_TX_INIT, 5, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_tx_init_fields }, + { DBS_TX_INIT_VAL, 6, 31, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_init_val_fields }, + { DBS_TX_PTR, 7, 24, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields }, + { DBS_TX_QOS_CTRL, 24, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_qos_ctrl_fields }, + { DBS_TX_QOS_DATA, 25, 44, NTHW_FPGA_REG_TYPE_WO, 0, 3, dbs_tx_qos_data_fields }, + { DBS_TX_QOS_RATE, 26, 35, NTHW_FPGA_REG_TYPE_RW, 131073, 2, dbs_tx_qos_rate_fields }, + { DBS_TX_QP_CTRL, 22, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_qp_ctrl_fields }, + { DBS_TX_QP_DATA, 23, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, dbs_tx_qp_data_fields }, + { DBS_TX_UW_CTRL, 16, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_uw_ctrl_fields }, + { DBS_TX_UW_DATA, 17, 94, NTHW_FPGA_REG_TYPE_WO, 0, 8, dbs_tx_uw_data_fields }, }; -static nthw_fpga_field_init_s iic_rx_fifo_fields[] = { - { IIC_RX_FIFO_RXDATA, 8, 0, 0 }, +static nthw_fpga_field_init_s flm_buf_ctrl_fields[] = { + { FLM_BUF_CTRL_INF_AVAIL, 16, 16, 0x0000 }, + { FLM_BUF_CTRL_LRN_FREE, 16, 0, 0x0000 }, + { FLM_BUF_CTRL_STA_AVAIL, 16, 32, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_control_fields[] = { + { FLM_CONTROL_CALIB_RECALIBRATE, 3, 28, 0 }, + { FLM_CONTROL_CRCRD, 1, 12, 0x0000 }, + { FLM_CONTROL_CRCWR, 1, 11, 0x0000 }, + { FLM_CONTROL_EAB, 5, 18, 0 }, + { FLM_CONTROL_ENABLE, 1, 0, 0 }, + { FLM_CONTROL_INIT, 1, 1, 0x0000 }, + { FLM_CONTROL_LDS, 1, 2, 0x0000 }, + { FLM_CONTROL_LFS, 1, 3, 0x0000 }, + { FLM_CONTROL_LIS, 1, 4, 0x0000 }, + { FLM_CONTROL_PDS, 1, 9, 0x0000 }, + { FLM_CONTROL_PIS, 1, 10, 0x0000 }, + { FLM_CONTROL_RBL, 4, 13, 0 }, + { FLM_CONTROL_RDS, 1, 7, 0x0000 }, + { FLM_CONTROL_RIS, 1, 8, 0x0000 }, + { FLM_CONTROL_SPLIT_SDRAM_USAGE, 5, 23, 16 }, + { FLM_CONTROL_UDS, 1, 5, 0x0000 }, + { FLM_CONTROL_UIS, 1, 6, 0x0000 }, + { FLM_CONTROL_WPD, 1, 17, 0 }, }; -static nthw_fpga_field_init_s iic_rx_fifo_ocy_fields[] = { - { IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 }, +static nthw_fpga_field_init_s flm_inf_data_fields[] = { + { FLM_INF_DATA_BYTES, 64, 0, 0x0000 }, { FLM_INF_DATA_CAUSE, 3, 224, 0x0000 }, + { FLM_INF_DATA_EOR, 1, 287, 0x0000 }, { FLM_INF_DATA_ID, 32, 192, 0x0000 }, + { FLM_INF_DATA_PACKETS, 64, 64, 0x0000 }, { FLM_INF_DATA_TS, 64, 128, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_load_aps_fields[] = { + { FLM_LOAD_APS_APS, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_load_bin_fields[] = { + { FLM_LOAD_BIN_BIN, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s iic_rx_fifo_pirq_fields[] = { - { IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 }, +static nthw_fpga_field_init_s flm_load_lps_fields[] = { + { FLM_LOAD_LPS_LPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s iic_softr_fields[] = { - { IIC_SOFTR_RKEY, 4, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_lrn_data_fields[] = { + { FLM_LRN_DATA_ADJ, 32, 480, 0x0000 }, { FLM_LRN_DATA_COLOR, 32, 448, 0x0000 }, + { FLM_LRN_DATA_DSCP, 6, 698, 0x0000 }, { FLM_LRN_DATA_ENT, 1, 693, 0x0000 }, + { FLM_LRN_DATA_EOR, 1, 767, 0x0000 }, { FLM_LRN_DATA_FILL, 16, 544, 0x0000 }, + { FLM_LRN_DATA_FT, 4, 560, 0x0000 }, { FLM_LRN_DATA_FT_MBR, 4, 564, 0x0000 }, + { FLM_LRN_DATA_FT_MISS, 4, 568, 0x0000 }, { FLM_LRN_DATA_ID, 32, 512, 0x0000 }, + { FLM_LRN_DATA_KID, 8, 328, 0x0000 }, { FLM_LRN_DATA_MBR_ID1, 28, 572, 0x0000 }, + { FLM_LRN_DATA_MBR_ID2, 28, 600, 0x0000 }, { FLM_LRN_DATA_MBR_ID3, 28, 628, 0x0000 }, + { FLM_LRN_DATA_MBR_ID4, 28, 656, 0x0000 }, { FLM_LRN_DATA_NAT_EN, 1, 711, 0x0000 }, + { FLM_LRN_DATA_NAT_IP, 32, 336, 0x0000 }, { FLM_LRN_DATA_NAT_PORT, 16, 400, 0x0000 }, + { FLM_LRN_DATA_NOFI, 1, 716, 0x0000 }, { FLM_LRN_DATA_OP, 4, 694, 0x0000 }, + { FLM_LRN_DATA_PRIO, 2, 691, 0x0000 }, { FLM_LRN_DATA_PROT, 8, 320, 0x0000 }, + { FLM_LRN_DATA_QFI, 6, 704, 0x0000 }, { FLM_LRN_DATA_QW0, 128, 192, 0x0000 }, + { FLM_LRN_DATA_QW4, 128, 64, 0x0000 }, { FLM_LRN_DATA_RATE, 16, 416, 0x0000 }, + { FLM_LRN_DATA_RQI, 1, 710, 0x0000 }, { FLM_LRN_DATA_SCRUB_PROF, 4, 712, 0x0000 }, + { FLM_LRN_DATA_SIZE, 16, 432, 0x0000 }, { FLM_LRN_DATA_STAT_PROF, 4, 687, 0x0000 }, + { FLM_LRN_DATA_SW8, 32, 32, 0x0000 }, { FLM_LRN_DATA_SW9, 32, 0, 0x0000 }, + { FLM_LRN_DATA_TEID, 32, 368, 0x0000 }, { FLM_LRN_DATA_VOL_IDX, 3, 684, 0x0000 }, }; -static nthw_fpga_field_init_s iic_sr_fields[] = { - { IIC_SR_AAS, 1, 1, 0 }, { IIC_SR_ABGC, 1, 0, 0 }, { IIC_SR_BB, 1, 2, 0 }, - { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 }, { IIC_SR_RXFIFO_FULL, 1, 5, 0 }, { IIC_SR_SRW, 1, 3, 0 }, - { IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 }, +static nthw_fpga_field_init_s flm_prio_fields[] = { + { FLM_PRIO_FT0, 4, 4, 1 }, { FLM_PRIO_FT1, 4, 12, 1 }, { FLM_PRIO_FT2, 4, 20, 1 }, + { FLM_PRIO_FT3, 4, 28, 1 }, { FLM_PRIO_LIMIT0, 4, 0, 0 }, { FLM_PRIO_LIMIT1, 4, 8, 0 }, + { FLM_PRIO_LIMIT2, 4, 16, 0 }, { FLM_PRIO_LIMIT3, 4, 24, 0 }, }; -static nthw_fpga_field_init_s iic_tbuf_fields[] = { - { IIC_TBUF_TBUF_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_pst_ctrl_fields[] = { + { FLM_PST_CTRL_ADR, 4, 0, 0x0000 }, + { FLM_PST_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s iic_ten_adr_fields[] = { - { IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 }, +static nthw_fpga_field_init_s flm_pst_data_fields[] = { + { FLM_PST_DATA_BP, 5, 0, 0x0000 }, + { FLM_PST_DATA_PP, 5, 5, 0x0000 }, + { FLM_PST_DATA_TP, 5, 10, 0x0000 }, }; -static nthw_fpga_field_init_s iic_thddat_fields[] = { - { IIC_THDDAT_THDDAT_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_rcp_ctrl_fields[] = { + { FLM_RCP_CTRL_ADR, 5, 0, 0x0000 }, + { FLM_RCP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s iic_thdsta_fields[] = { - { IIC_THDSTA_THDSTA_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_rcp_data_fields[] = { + { FLM_RCP_DATA_AUTO_IPV4_MASK, 1, 402, 0x0000 }, + { FLM_RCP_DATA_BYT_DYN, 5, 387, 0x0000 }, + { FLM_RCP_DATA_BYT_OFS, 8, 392, 0x0000 }, + { FLM_RCP_DATA_IPN, 1, 386, 0x0000 }, + { FLM_RCP_DATA_KID, 8, 377, 0x0000 }, + { FLM_RCP_DATA_LOOKUP, 1, 0, 0x0000 }, + { FLM_RCP_DATA_MASK, 320, 57, 0x0000 }, + { FLM_RCP_DATA_OPN, 1, 385, 0x0000 }, + { FLM_RCP_DATA_QW0_DYN, 5, 1, 0x0000 }, + { FLM_RCP_DATA_QW0_OFS, 8, 6, 0x0000 }, + { FLM_RCP_DATA_QW0_SEL, 2, 14, 0x0000 }, + { FLM_RCP_DATA_QW4_DYN, 5, 16, 0x0000 }, + { FLM_RCP_DATA_QW4_OFS, 8, 21, 0x0000 }, + { FLM_RCP_DATA_SW8_DYN, 5, 29, 0x0000 }, + { FLM_RCP_DATA_SW8_OFS, 8, 34, 0x0000 }, + { FLM_RCP_DATA_SW8_SEL, 2, 42, 0x0000 }, + { FLM_RCP_DATA_SW9_DYN, 5, 44, 0x0000 }, + { FLM_RCP_DATA_SW9_OFS, 8, 49, 0x0000 }, + { FLM_RCP_DATA_TXPLM, 2, 400, 0x0000 }, }; -static nthw_fpga_field_init_s iic_thigh_fields[] = { - { IIC_THIGH_THIGH_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_scan_fields[] = { + { FLM_SCAN_I, 16, 0, 0 }, }; -static nthw_fpga_field_init_s iic_tlow_fields[] = { - { IIC_TLOW_TLOW_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_scrub_ctrl_fields[] = { + { FLM_SCRUB_CTRL_ADR, 4, 0, 0x0000 }, + { FLM_SCRUB_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s iic_tsudat_fields[] = { - { IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_scrub_data_fields[] = { + { FLM_SCRUB_DATA_DEL, 1, 12, 0 }, + { FLM_SCRUB_DATA_INF, 1, 13, 0 }, + { FLM_SCRUB_DATA_R, 4, 8, 0 }, + { FLM_SCRUB_DATA_T, 8, 0, 0 }, }; -static nthw_fpga_field_init_s iic_tsusta_fields[] = { - { IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_status_fields[] = { + { FLM_STATUS_CACHE_BUFFER_CRITICAL, 1, 12, 0x0000 }, + { FLM_STATUS_CALIB_FAIL, 3, 3, 0 }, + { FLM_STATUS_CALIB_SUCCESS, 3, 0, 0 }, + { FLM_STATUS_CRCERR, 1, 10, 0x0000 }, + { FLM_STATUS_CRITICAL, 1, 8, 0x0000 }, + { FLM_STATUS_EFT_BP, 1, 11, 0x0000 }, + { FLM_STATUS_IDLE, 1, 7, 0x0000 }, + { FLM_STATUS_INITDONE, 1, 6, 0x0000 }, + { FLM_STATUS_PANIC, 1, 9, 0x0000 }, }; -static nthw_fpga_field_init_s iic_tsusto_fields[] = { - { IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 }, +static nthw_fpga_field_init_s flm_stat_aul_done_fields[] = { + { FLM_STAT_AUL_DONE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s iic_tx_fifo_fields[] = { - { IIC_TX_FIFO_START, 1, 8, 0 }, - { IIC_TX_FIFO_STOP, 1, 9, 0 }, - { IIC_TX_FIFO_TXDATA, 8, 0, 0 }, +static nthw_fpga_field_init_s flm_stat_aul_fail_fields[] = { + { FLM_STAT_AUL_FAIL_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s iic_tx_fifo_ocy_fields[] = { - { IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 }, +static nthw_fpga_field_init_s flm_stat_aul_ignore_fields[] = { + { FLM_STAT_AUL_IGNORE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_register_init_s iic_registers[] = { - { IIC_ADR, 68, 8, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_adr_fields }, - { IIC_CR, 64, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, iic_cr_fields }, - { IIC_DGIE, 7, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_dgie_fields }, - { IIC_GPO, 73, 1, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_gpo_fields }, - { IIC_IER, 10, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, iic_ier_fields }, - { IIC_ISR, 8, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, iic_isr_fields }, - { IIC_RX_FIFO, 67, 8, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_rx_fifo_fields }, - { IIC_RX_FIFO_OCY, 70, 4, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_rx_fifo_ocy_fields }, - { IIC_RX_FIFO_PIRQ, 72, 4, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_rx_fifo_pirq_fields }, - { IIC_SOFTR, 16, 4, NTHW_FPGA_REG_TYPE_WO, 0, 1, iic_softr_fields }, - { IIC_SR, 65, 8, NTHW_FPGA_REG_TYPE_RO, 192, 8, iic_sr_fields }, - { IIC_TBUF, 78, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tbuf_fields }, - { IIC_TEN_ADR, 71, 3, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_ten_adr_fields }, - { IIC_THDDAT, 81, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_thddat_fields }, - { IIC_THDSTA, 76, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_thdsta_fields }, - { IIC_THIGH, 79, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_thigh_fields }, - { IIC_TLOW, 80, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tlow_fields }, - { IIC_TSUDAT, 77, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tsudat_fields }, - { IIC_TSUSTA, 74, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tsusta_fields }, - { IIC_TSUSTO, 75, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tsusto_fields }, - { IIC_TX_FIFO, 66, 10, NTHW_FPGA_REG_TYPE_WO, 0, 3, iic_tx_fifo_fields }, - { IIC_TX_FIFO_OCY, 69, 4, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_tx_fifo_ocy_fields }, +static nthw_fpga_field_init_s flm_stat_csh_hit_fields[] = { + { FLM_STAT_CSH_HIT_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_cam_ctrl_fields[] = { - { KM_CAM_CTRL_ADR, 13, 0, 0x0000 }, - { KM_CAM_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_csh_miss_fields[] = { + { FLM_STAT_CSH_MISS_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_cam_data_fields[] = { - { KM_CAM_DATA_FT0, 4, 192, 0x0000 }, { KM_CAM_DATA_FT1, 4, 196, 0x0000 }, - { KM_CAM_DATA_FT2, 4, 200, 0x0000 }, { KM_CAM_DATA_FT3, 4, 204, 0x0000 }, - { KM_CAM_DATA_FT4, 4, 208, 0x0000 }, { KM_CAM_DATA_FT5, 4, 212, 0x0000 }, - { KM_CAM_DATA_W0, 32, 0, 0x0000 }, { KM_CAM_DATA_W1, 32, 32, 0x0000 }, - { KM_CAM_DATA_W2, 32, 64, 0x0000 }, { KM_CAM_DATA_W3, 32, 96, 0x0000 }, - { KM_CAM_DATA_W4, 32, 128, 0x0000 }, { KM_CAM_DATA_W5, 32, 160, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_csh_unh_fields[] = { + { FLM_STAT_CSH_UNH_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_rcp_ctrl_fields[] = { - { KM_RCP_CTRL_ADR, 5, 0, 0x0000 }, - { KM_RCP_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_cuc_move_fields[] = { + { FLM_STAT_CUC_MOVE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_rcp_data_fields[] = { - { KM_RCP_DATA_BANK_A, 12, 694, 0x0000 }, { KM_RCP_DATA_BANK_B, 12, 706, 0x0000 }, - { KM_RCP_DATA_DUAL, 1, 651, 0x0000 }, { KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 }, - { KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 }, { KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 }, - { KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 }, { KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 }, - { KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 }, { KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 }, - { KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 }, { KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 }, - { KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 }, { KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 }, - { KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 }, { KM_RCP_DATA_EL_A, 4, 653, 0x0000 }, - { KM_RCP_DATA_EL_B, 3, 657, 0x0000 }, { KM_RCP_DATA_FTM_A, 16, 662, 0x0000 }, - { KM_RCP_DATA_FTM_B, 16, 678, 0x0000 }, { KM_RCP_DATA_INFO_A, 1, 660, 0x0000 }, - { KM_RCP_DATA_INFO_B, 1, 661, 0x0000 }, { KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 }, - { KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 }, { KM_RCP_DATA_KL_A, 4, 718, 0x0000 }, - { KM_RCP_DATA_KL_B, 3, 722, 0x0000 }, { KM_RCP_DATA_MASK_A, 384, 75, 0x0000 }, - { KM_RCP_DATA_MASK_B, 192, 459, 0x0000 }, { KM_RCP_DATA_PAIRED, 1, 652, 0x0000 }, - { KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 }, { KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 }, - { KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 }, { KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 }, - { KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 }, { KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 }, - { KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 }, { KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 }, - { KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 }, { KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 }, - { KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 }, { KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 }, - { KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 }, { KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 }, - { KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 }, { KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_cuc_start_fields[] = { + { FLM_STAT_CUC_START_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_status_fields[] = { - { KM_STATUS_TCQ_RDY, 1, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_flows_fields[] = { + { FLM_STAT_FLOWS_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_tcam_ctrl_fields[] = { - { KM_TCAM_CTRL_ADR, 14, 0, 0x0000 }, - { KM_TCAM_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_inf_done_fields[] = { + { FLM_STAT_INF_DONE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_tcam_data_fields[] = { - { KM_TCAM_DATA_T, 72, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_inf_skip_fields[] = { + { FLM_STAT_INF_SKIP_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_tci_ctrl_fields[] = { - { KM_TCI_CTRL_ADR, 10, 0, 0x0000 }, - { KM_TCI_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_lrn_done_fields[] = { + { FLM_STAT_LRN_DONE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_tci_data_fields[] = { - { KM_TCI_DATA_COLOR, 32, 0, 0x0000 }, - { KM_TCI_DATA_FT, 4, 32, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_lrn_fail_fields[] = { + { FLM_STAT_LRN_FAIL_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_tcq_ctrl_fields[] = { - { KM_TCQ_CTRL_ADR, 7, 0, 0x0000 }, - { KM_TCQ_CTRL_CNT, 5, 16, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_lrn_ignore_fields[] = { + { FLM_STAT_LRN_IGNORE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s km_tcq_data_fields[] = { - { KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 }, - { KM_TCQ_DATA_QUAL, 3, 12, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_pck_dis_fields[] = { + { FLM_STAT_PCK_DIS_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_register_init_s km_registers[] = { - { KM_CAM_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_cam_ctrl_fields }, - { KM_CAM_DATA, 3, 216, NTHW_FPGA_REG_TYPE_WO, 0, 12, km_cam_data_fields }, - { KM_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_rcp_ctrl_fields }, - { KM_RCP_DATA, 1, 781, NTHW_FPGA_REG_TYPE_WO, 0, 44, km_rcp_data_fields }, - { KM_STATUS, 10, 1, NTHW_FPGA_REG_TYPE_RO, 0, 1, km_status_fields }, - { KM_TCAM_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tcam_ctrl_fields }, - { KM_TCAM_DATA, 5, 72, NTHW_FPGA_REG_TYPE_WO, 0, 1, km_tcam_data_fields }, - { KM_TCI_CTRL, 6, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tci_ctrl_fields }, - { KM_TCI_DATA, 7, 36, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tci_data_fields }, - { KM_TCQ_CTRL, 8, 21, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tcq_ctrl_fields }, - { KM_TCQ_DATA, 9, 15, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tcq_data_fields }, +static nthw_fpga_field_init_s flm_stat_pck_hit_fields[] = { + { FLM_STAT_PCK_HIT_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_bad_code_fields[] = { - { MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_pck_miss_fields[] = { + { FLM_STAT_PCK_MISS_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_bip_err_fields[] = { - { MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_pck_unh_fields[] = { + { FLM_STAT_PCK_UNH_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_block_lock_fields[] = { - { MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_prb_done_fields[] = { + { FLM_STAT_PRB_DONE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_block_lock_chg_fields[] = { - { MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_prb_ignore_fields[] = { + { FLM_STAT_PRB_IGNORE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_debounce_ctrl_fields[] = { - { MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 }, - { MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 }, - { MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 }, - { MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 }, +static nthw_fpga_field_init_s flm_stat_rel_done_fields[] = { + { FLM_STAT_REL_DONE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_drp_ctrl_fields[] = { - { MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 }, { MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 }, - { MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 }, { MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 }, +static nthw_fpga_field_init_s flm_stat_rel_ignore_fields[] = { + { FLM_STAT_REL_IGNORE_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_stat_sta_done_fields[] = { + { FLM_STAT_STA_DONE_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_stat_tul_done_fields[] = { + { FLM_STAT_TUL_DONE_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_stat_unl_done_fields[] = { + { FLM_STAT_UNL_DONE_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_stat_unl_ignore_fields[] = { + { FLM_STAT_UNL_IGNORE_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s flm_sta_data_fields[] = { + { FLM_STA_DATA_EOR, 1, 95, 0x0000 }, { FLM_STA_DATA_ID, 32, 0, 0x0000 }, + { FLM_STA_DATA_LDS, 1, 32, 0x0000 }, { FLM_STA_DATA_LFS, 1, 33, 0x0000 }, + { FLM_STA_DATA_LIS, 1, 34, 0x0000 }, { FLM_STA_DATA_PDS, 1, 39, 0x0000 }, + { FLM_STA_DATA_PIS, 1, 40, 0x0000 }, { FLM_STA_DATA_RDS, 1, 37, 0x0000 }, + { FLM_STA_DATA_RIS, 1, 38, 0x0000 }, { FLM_STA_DATA_UDS, 1, 35, 0x0000 }, + { FLM_STA_DATA_UIS, 1, 36, 0x0000 }, +}; + +static nthw_fpga_register_init_s flm_registers[] = { + { FLM_BUF_CTRL, 14, 48, NTHW_FPGA_REG_TYPE_RW, 0, 3, flm_buf_ctrl_fields }, + { FLM_CONTROL, 0, 31, NTHW_FPGA_REG_TYPE_MIXED, 134217728, 18, flm_control_fields }, + { FLM_INF_DATA, 16, 288, NTHW_FPGA_REG_TYPE_RO, 0, 6, flm_inf_data_fields }, + { FLM_LOAD_APS, 5, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_load_aps_fields }, + { FLM_LOAD_BIN, 3, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, flm_load_bin_fields }, + { FLM_LOAD_LPS, 4, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_load_lps_fields }, + { FLM_LRN_DATA, 15, 768, NTHW_FPGA_REG_TYPE_WO, 0, 34, flm_lrn_data_fields }, + { FLM_PRIO, 6, 32, NTHW_FPGA_REG_TYPE_WO, 269488144, 8, flm_prio_fields }, + { FLM_PST_CTRL, 12, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, flm_pst_ctrl_fields }, + { FLM_PST_DATA, 13, 15, NTHW_FPGA_REG_TYPE_WO, 0, 3, flm_pst_data_fields }, + { FLM_RCP_CTRL, 8, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, flm_rcp_ctrl_fields }, + { FLM_RCP_DATA, 9, 403, NTHW_FPGA_REG_TYPE_WO, 0, 19, flm_rcp_data_fields }, + { FLM_SCAN, 2, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, flm_scan_fields }, + { FLM_SCRUB_CTRL, 10, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, flm_scrub_ctrl_fields }, + { FLM_SCRUB_DATA, 11, 14, NTHW_FPGA_REG_TYPE_WO, 0, 4, flm_scrub_data_fields }, + { FLM_STATUS, 1, 17, NTHW_FPGA_REG_TYPE_MIXED, 0, 9, flm_status_fields }, + { FLM_STAT_AUL_DONE, 41, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_aul_done_fields }, + { FLM_STAT_AUL_FAIL, 43, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_aul_fail_fields }, + { FLM_STAT_AUL_IGNORE, 42, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_aul_ignore_fields }, + { FLM_STAT_CSH_HIT, 52, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_csh_hit_fields }, + { FLM_STAT_CSH_MISS, 53, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_csh_miss_fields }, + { FLM_STAT_CSH_UNH, 54, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_csh_unh_fields }, + { FLM_STAT_CUC_MOVE, 56, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_cuc_move_fields }, + { FLM_STAT_CUC_START, 55, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_cuc_start_fields }, + { FLM_STAT_FLOWS, 18, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_flows_fields }, + { FLM_STAT_INF_DONE, 46, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_inf_done_fields }, + { FLM_STAT_INF_SKIP, 47, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_inf_skip_fields }, + { FLM_STAT_LRN_DONE, 32, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_lrn_done_fields }, + { FLM_STAT_LRN_FAIL, 34, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_lrn_fail_fields }, + { FLM_STAT_LRN_IGNORE, 33, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_lrn_ignore_fields }, + { FLM_STAT_PCK_DIS, 51, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_pck_dis_fields }, + { FLM_STAT_PCK_HIT, 48, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_pck_hit_fields }, + { FLM_STAT_PCK_MISS, 49, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_pck_miss_fields }, + { FLM_STAT_PCK_UNH, 50, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_pck_unh_fields }, + { FLM_STAT_PRB_DONE, 39, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_prb_done_fields }, + { FLM_STAT_PRB_IGNORE, 40, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_prb_ignore_fields }, + { FLM_STAT_REL_DONE, 37, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_rel_done_fields }, + { FLM_STAT_REL_IGNORE, 38, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_rel_ignore_fields }, + { FLM_STAT_STA_DONE, 45, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_sta_done_fields }, + { FLM_STAT_TUL_DONE, 44, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_tul_done_fields }, + { FLM_STAT_UNL_DONE, 35, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_unl_done_fields }, + { FLM_STAT_UNL_IGNORE, 36, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, flm_stat_unl_ignore_fields }, + { FLM_STA_DATA, 17, 96, NTHW_FPGA_REG_TYPE_RO, 0, 11, flm_sta_data_fields }, +}; + +static nthw_fpga_field_init_s gfg_burstsize0_fields[] = { + { GFG_BURSTSIZE0_VAL, 24, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_burstsize1_fields[] = { + { GFG_BURSTSIZE1_VAL, 24, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_ctrl0_fields[] = { + { GFG_CTRL0_ENABLE, 1, 0, 0 }, + { GFG_CTRL0_MODE, 3, 1, 0 }, + { GFG_CTRL0_PRBS_EN, 1, 4, 0 }, + { GFG_CTRL0_SIZE, 14, 16, 64 }, +}; + +static nthw_fpga_field_init_s gfg_ctrl1_fields[] = { + { GFG_CTRL1_ENABLE, 1, 0, 0 }, + { GFG_CTRL1_MODE, 3, 1, 0 }, + { GFG_CTRL1_PRBS_EN, 1, 4, 0 }, + { GFG_CTRL1_SIZE, 14, 16, 64 }, +}; + +static nthw_fpga_field_init_s gfg_run0_fields[] = { + { GFG_RUN0_RUN, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_run1_fields[] = { + { GFG_RUN1_RUN, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_sizemask0_fields[] = { + { GFG_SIZEMASK0_VAL, 14, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_sizemask1_fields[] = { + { GFG_SIZEMASK1_VAL, 14, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_streamid0_fields[] = { + { GFG_STREAMID0_VAL, 8, 0, 0 }, +}; + +static nthw_fpga_field_init_s gfg_streamid1_fields[] = { + { GFG_STREAMID1_VAL, 8, 0, 1 }, +}; + +static nthw_fpga_register_init_s gfg_registers[] = { + { GFG_BURSTSIZE0, 3, 24, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_burstsize0_fields }, + { GFG_BURSTSIZE1, 8, 24, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_burstsize1_fields }, + { GFG_CTRL0, 0, 30, NTHW_FPGA_REG_TYPE_WO, 4194304, 4, gfg_ctrl0_fields }, + { GFG_CTRL1, 5, 30, NTHW_FPGA_REG_TYPE_WO, 4194304, 4, gfg_ctrl1_fields }, + { GFG_RUN0, 1, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_run0_fields }, + { GFG_RUN1, 6, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_run1_fields }, + { GFG_SIZEMASK0, 4, 14, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_sizemask0_fields }, + { GFG_SIZEMASK1, 9, 14, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_sizemask1_fields }, + { GFG_STREAMID0, 2, 8, NTHW_FPGA_REG_TYPE_WO, 0, 1, gfg_streamid0_fields }, + { GFG_STREAMID1, 7, 8, NTHW_FPGA_REG_TYPE_WO, 1, 1, gfg_streamid1_fields }, +}; + +static nthw_fpga_field_init_s gmf_ctrl_fields[] = { + { GMF_CTRL_ENABLE, 1, 0, 0 }, + { GMF_CTRL_FCS_ALWAYS, 1, 1, 0 }, + { GMF_CTRL_IFG_AUTO_ADJUST_ENABLE, 1, 7, 0 }, + { GMF_CTRL_IFG_ENABLE, 1, 2, 0 }, + { GMF_CTRL_IFG_TX_NOW_ALWAYS, 1, 3, 0 }, + { GMF_CTRL_IFG_TX_NOW_ON_TS_ENABLE, 1, 5, 0 }, + { GMF_CTRL_IFG_TX_ON_TS_ADJUST_ON_SET_CLOCK, 1, 6, 0 }, + { GMF_CTRL_IFG_TX_ON_TS_ALWAYS, 1, 4, 0 }, + { GMF_CTRL_TS_INJECT_ALWAYS, 1, 8, 0 }, + { GMF_CTRL_TS_INJECT_DUAL_STEP, 1, 9, 0 }, +}; + +static nthw_fpga_field_init_s gmf_debug_lane_marker_fields[] = { + { GMF_DEBUG_LANE_MARKER_COMPENSATION, 16, 0, 16384 }, +}; + +static nthw_fpga_field_init_s gmf_ifg_max_adjust_slack_fields[] = { + { GMF_IFG_MAX_ADJUST_SLACK_SLACK, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_ifg_set_clock_delta_fields[] = { + { GMF_IFG_SET_CLOCK_DELTA_DELTA, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_ifg_set_clock_delta_adjust_fields[] = { + { GMF_IFG_SET_CLOCK_DELTA_ADJUST_DELTA, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_ifg_tx_now_on_ts_fields[] = { + { GMF_IFG_TX_NOW_ON_TS_TS, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_speed_fields[] = { + { GMF_SPEED_IFG_SPEED, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_stat_data_buffer_fields[] = { + { GMF_STAT_DATA_BUFFER_USED, 15, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s gmf_stat_max_delayed_pkt_fields[] = { + { GMF_STAT_MAX_DELAYED_PKT_NS, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_stat_next_pkt_fields[] = { + { GMF_STAT_NEXT_PKT_NS, 64, 0, 0 }, +}; + +static nthw_fpga_field_init_s gmf_stat_sticky_fields[] = { + { GMF_STAT_STICKY_DATA_UNDERFLOWED, 1, 0, 0 }, + { GMF_STAT_STICKY_IFG_ADJUSTED, 1, 1, 0 }, +}; + +static nthw_fpga_field_init_s gmf_ts_inject_fields[] = { + { GMF_TS_INJECT_OFFSET, 14, 0, 0 }, + { GMF_TS_INJECT_POS, 2, 14, 0 }, +}; + +static nthw_fpga_register_init_s gmf_registers[] = { + { GMF_CTRL, 0, 10, NTHW_FPGA_REG_TYPE_WO, 0, 10, gmf_ctrl_fields }, + { + GMF_DEBUG_LANE_MARKER, 7, 16, NTHW_FPGA_REG_TYPE_WO, 16384, 1, + gmf_debug_lane_marker_fields + }, + { + GMF_IFG_MAX_ADJUST_SLACK, 4, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, + gmf_ifg_max_adjust_slack_fields + }, + { + GMF_IFG_SET_CLOCK_DELTA, 2, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, + gmf_ifg_set_clock_delta_fields + }, + { + GMF_IFG_SET_CLOCK_DELTA_ADJUST, 3, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, + gmf_ifg_set_clock_delta_adjust_fields + }, + { GMF_IFG_TX_NOW_ON_TS, 5, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, gmf_ifg_tx_now_on_ts_fields }, + { GMF_SPEED, 1, 64, NTHW_FPGA_REG_TYPE_WO, 0, 1, gmf_speed_fields }, + { GMF_STAT_DATA_BUFFER, 9, 15, NTHW_FPGA_REG_TYPE_RO, 0, 1, gmf_stat_data_buffer_fields }, + { + GMF_STAT_MAX_DELAYED_PKT, 11, 64, NTHW_FPGA_REG_TYPE_RC1, 0, 1, + gmf_stat_max_delayed_pkt_fields + }, + { GMF_STAT_NEXT_PKT, 10, 64, NTHW_FPGA_REG_TYPE_RO, 0, 1, gmf_stat_next_pkt_fields }, + { GMF_STAT_STICKY, 8, 2, NTHW_FPGA_REG_TYPE_RC1, 0, 2, gmf_stat_sticky_fields }, + { GMF_TS_INJECT, 6, 16, NTHW_FPGA_REG_TYPE_WO, 0, 2, gmf_ts_inject_fields }, +}; + +static nthw_fpga_field_init_s gpio_phy_cfg_fields[] = { + { GPIO_PHY_CFG_E_PORT0_RXLOS, 1, 8, 0 }, { GPIO_PHY_CFG_E_PORT1_RXLOS, 1, 9, 0 }, + { GPIO_PHY_CFG_PORT0_INT_B, 1, 1, 1 }, { GPIO_PHY_CFG_PORT0_LPMODE, 1, 0, 0 }, + { GPIO_PHY_CFG_PORT0_MODPRS_B, 1, 3, 1 }, { GPIO_PHY_CFG_PORT0_RESET_B, 1, 2, 0 }, + { GPIO_PHY_CFG_PORT1_INT_B, 1, 5, 1 }, { GPIO_PHY_CFG_PORT1_LPMODE, 1, 4, 0 }, + { GPIO_PHY_CFG_PORT1_MODPRS_B, 1, 7, 1 }, { GPIO_PHY_CFG_PORT1_RESET_B, 1, 6, 0 }, +}; + +static nthw_fpga_field_init_s gpio_phy_gpio_fields[] = { + { GPIO_PHY_GPIO_E_PORT0_RXLOS, 1, 8, 0 }, { GPIO_PHY_GPIO_E_PORT1_RXLOS, 1, 9, 0 }, + { GPIO_PHY_GPIO_PORT0_INT_B, 1, 1, 0x0000 }, { GPIO_PHY_GPIO_PORT0_LPMODE, 1, 0, 1 }, + { GPIO_PHY_GPIO_PORT0_MODPRS_B, 1, 3, 0x0000 }, { GPIO_PHY_GPIO_PORT0_RESET_B, 1, 2, 0 }, + { GPIO_PHY_GPIO_PORT1_INT_B, 1, 5, 0x0000 }, { GPIO_PHY_GPIO_PORT1_LPMODE, 1, 4, 1 }, + { GPIO_PHY_GPIO_PORT1_MODPRS_B, 1, 7, 0x0000 }, { GPIO_PHY_GPIO_PORT1_RESET_B, 1, 6, 0 }, +}; + +static nthw_fpga_register_init_s gpio_phy_registers[] = { + { GPIO_PHY_CFG, 0, 10, NTHW_FPGA_REG_TYPE_RW, 170, 10, gpio_phy_cfg_fields }, + { GPIO_PHY_GPIO, 1, 10, NTHW_FPGA_REG_TYPE_RW, 17, 10, gpio_phy_gpio_fields }, +}; + +static nthw_fpga_field_init_s hfu_rcp_ctrl_fields[] = { + { HFU_RCP_CTRL_ADR, 6, 0, 0x0000 }, + { HFU_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s hfu_rcp_data_fields[] = { + { HFU_RCP_DATA_LEN_A_ADD_DYN, 5, 15, 0x0000 }, + { HFU_RCP_DATA_LEN_A_ADD_OFS, 8, 20, 0x0000 }, + { HFU_RCP_DATA_LEN_A_OL4LEN, 1, 1, 0x0000 }, + { HFU_RCP_DATA_LEN_A_POS_DYN, 5, 2, 0x0000 }, + { HFU_RCP_DATA_LEN_A_POS_OFS, 8, 7, 0x0000 }, + { HFU_RCP_DATA_LEN_A_SUB_DYN, 5, 28, 0x0000 }, + { HFU_RCP_DATA_LEN_A_WR, 1, 0, 0x0000 }, + { HFU_RCP_DATA_LEN_B_ADD_DYN, 5, 47, 0x0000 }, + { HFU_RCP_DATA_LEN_B_ADD_OFS, 8, 52, 0x0000 }, + { HFU_RCP_DATA_LEN_B_POS_DYN, 5, 34, 0x0000 }, + { HFU_RCP_DATA_LEN_B_POS_OFS, 8, 39, 0x0000 }, + { HFU_RCP_DATA_LEN_B_SUB_DYN, 5, 60, 0x0000 }, + { HFU_RCP_DATA_LEN_B_WR, 1, 33, 0x0000 }, + { HFU_RCP_DATA_LEN_C_ADD_DYN, 5, 79, 0x0000 }, + { HFU_RCP_DATA_LEN_C_ADD_OFS, 8, 84, 0x0000 }, + { HFU_RCP_DATA_LEN_C_POS_DYN, 5, 66, 0x0000 }, + { HFU_RCP_DATA_LEN_C_POS_OFS, 8, 71, 0x0000 }, + { HFU_RCP_DATA_LEN_C_SUB_DYN, 5, 92, 0x0000 }, + { HFU_RCP_DATA_LEN_C_WR, 1, 65, 0x0000 }, + { HFU_RCP_DATA_TTL_POS_DYN, 5, 98, 0x0000 }, + { HFU_RCP_DATA_TTL_POS_OFS, 8, 103, 0x0000 }, + { HFU_RCP_DATA_TTL_WR, 1, 97, 0x0000 }, +}; + +static nthw_fpga_register_init_s hfu_registers[] = { + { HFU_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, hfu_rcp_ctrl_fields }, + { HFU_RCP_DATA, 1, 111, NTHW_FPGA_REG_TYPE_WO, 0, 22, hfu_rcp_data_fields }, +}; + +static nthw_fpga_field_init_s hif_build_time_fields[] = { + { HIF_BUILD_TIME_TIME, 32, 0, 1726740521 }, +}; + +static nthw_fpga_field_init_s hif_config_fields[] = { + { HIF_CONFIG_EXT_TAG, 1, 6, 0x0000 }, + { HIF_CONFIG_MAX_READ, 3, 3, 0x0000 }, + { HIF_CONFIG_MAX_TLP, 3, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s hif_control_fields[] = { + { HIF_CONTROL_BLESSED, 8, 4, 0 }, + { HIF_CONTROL_FSR, 1, 12, 1 }, + { HIF_CONTROL_WRAW, 4, 0, 1 }, +}; + +static nthw_fpga_field_init_s hif_prod_id_ex_fields[] = { + { HIF_PROD_ID_EX_LAYOUT, 1, 31, 0 }, + { HIF_PROD_ID_EX_LAYOUT_VERSION, 8, 0, 1 }, + { HIF_PROD_ID_EX_RESERVED, 23, 8, 0 }, +}; + +static nthw_fpga_field_init_s hif_prod_id_lsb_fields[] = { + { HIF_PROD_ID_LSB_GROUP_ID, 16, 16, 9563 }, + { HIF_PROD_ID_LSB_REV_ID, 8, 0, 49 }, + { HIF_PROD_ID_LSB_VER_ID, 8, 8, 55 }, +}; + +static nthw_fpga_field_init_s hif_prod_id_msb_fields[] = { + { HIF_PROD_ID_MSB_BUILD_NO, 10, 12, 0 }, + { HIF_PROD_ID_MSB_TYPE_ID, 12, 0, 200 }, +}; + +static nthw_fpga_field_init_s hif_sample_time_fields[] = { + { HIF_SAMPLE_TIME_SAMPLE_TIME, 1, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s hif_status_fields[] = { + { HIF_STATUS_RD_ERR, 1, 9, 0 }, + { HIF_STATUS_TAGS_IN_USE, 8, 0, 0 }, + { HIF_STATUS_WR_ERR, 1, 8, 0 }, +}; + +static nthw_fpga_field_init_s hif_stat_ctrl_fields[] = { + { HIF_STAT_CTRL_STAT_ENA, 1, 1, 0 }, + { HIF_STAT_CTRL_STAT_REQ, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s hif_stat_refclk_fields[] = { + { HIF_STAT_REFCLK_REFCLK250, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s hif_stat_rx_fields[] = { + { HIF_STAT_RX_COUNTER, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s hif_stat_tx_fields[] = { + { HIF_STAT_TX_COUNTER, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s hif_test0_fields[] = { + { HIF_TEST0_DATA, 32, 0, 287454020 }, +}; + +static nthw_fpga_field_init_s hif_test1_fields[] = { + { HIF_TEST1_DATA, 32, 0, 2864434397 }, +}; + +static nthw_fpga_field_init_s hif_uuid0_fields[] = { + { HIF_UUID0_UUID0, 32, 0, 1021928912 }, +}; + +static nthw_fpga_field_init_s hif_uuid1_fields[] = { + { HIF_UUID1_UUID1, 32, 0, 2998983545 }, +}; + +static nthw_fpga_field_init_s hif_uuid2_fields[] = { + { HIF_UUID2_UUID2, 32, 0, 827210969 }, +}; + +static nthw_fpga_field_init_s hif_uuid3_fields[] = { + { HIF_UUID3_UUID3, 32, 0, 462142918 }, +}; + +static nthw_fpga_register_init_s hif_registers[] = { + { HIF_BUILD_TIME, 16, 32, NTHW_FPGA_REG_TYPE_RO, 1726740521, 1, hif_build_time_fields }, + { HIF_CONFIG, 24, 7, NTHW_FPGA_REG_TYPE_RW, 0, 3, hif_config_fields }, + { HIF_CONTROL, 40, 13, NTHW_FPGA_REG_TYPE_MIXED, 4097, 3, hif_control_fields }, + { HIF_PROD_ID_EX, 112, 32, NTHW_FPGA_REG_TYPE_RO, 1, 3, hif_prod_id_ex_fields }, + { HIF_PROD_ID_LSB, 0, 32, NTHW_FPGA_REG_TYPE_RO, 626734897, 3, hif_prod_id_lsb_fields }, + { HIF_PROD_ID_MSB, 8, 22, NTHW_FPGA_REG_TYPE_RO, 200, 2, hif_prod_id_msb_fields }, + { HIF_SAMPLE_TIME, 96, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, hif_sample_time_fields }, + { HIF_STATUS, 32, 10, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, hif_status_fields }, + { HIF_STAT_CTRL, 64, 2, NTHW_FPGA_REG_TYPE_WO, 0, 2, hif_stat_ctrl_fields }, + { HIF_STAT_REFCLK, 72, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, hif_stat_refclk_fields }, + { HIF_STAT_RX, 88, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, hif_stat_rx_fields }, + { HIF_STAT_TX, 80, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, hif_stat_tx_fields }, + { HIF_TEST0, 48, 32, NTHW_FPGA_REG_TYPE_RW, 287454020, 1, hif_test0_fields }, + { HIF_TEST1, 56, 32, NTHW_FPGA_REG_TYPE_RW, 2864434397, 1, hif_test1_fields }, + { HIF_UUID0, 128, 32, NTHW_FPGA_REG_TYPE_RO, 1021928912, 1, hif_uuid0_fields }, + { HIF_UUID1, 144, 32, NTHW_FPGA_REG_TYPE_RO, 2998983545, 1, hif_uuid1_fields }, + { HIF_UUID2, 160, 32, NTHW_FPGA_REG_TYPE_RO, 827210969, 1, hif_uuid2_fields }, + { HIF_UUID3, 176, 32, NTHW_FPGA_REG_TYPE_RO, 462142918, 1, hif_uuid3_fields }, +}; + +static nthw_fpga_field_init_s hsh_rcp_ctrl_fields[] = { + { HSH_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { HSH_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s hsh_rcp_data_fields[] = { + { HSH_RCP_DATA_AUTO_IPV4_MASK, 1, 742, 0x0000 }, + { HSH_RCP_DATA_HSH_TYPE, 5, 416, 0x0000 }, + { HSH_RCP_DATA_HSH_VALID, 1, 415, 0x0000 }, + { HSH_RCP_DATA_K, 320, 422, 0x0000 }, + { HSH_RCP_DATA_LOAD_DIST_TYPE, 2, 0, 0x0000 }, + { HSH_RCP_DATA_MAC_PORT_MASK, 2, 2, 0x0000 }, + { HSH_RCP_DATA_P_MASK, 1, 61, 0x0000 }, + { HSH_RCP_DATA_QW0_OFS, 8, 11, 0x0000 }, + { HSH_RCP_DATA_QW0_PE, 5, 6, 0x0000 }, + { HSH_RCP_DATA_QW4_OFS, 8, 24, 0x0000 }, + { HSH_RCP_DATA_QW4_PE, 5, 19, 0x0000 }, + { HSH_RCP_DATA_SEED, 32, 382, 0x0000 }, + { HSH_RCP_DATA_SORT, 2, 4, 0x0000 }, + { HSH_RCP_DATA_TNL_P, 1, 414, 0x0000 }, + { HSH_RCP_DATA_TOEPLITZ, 1, 421, 0x0000 }, + { HSH_RCP_DATA_W8_OFS, 8, 37, 0x0000 }, + { HSH_RCP_DATA_W8_PE, 5, 32, 0x0000 }, + { HSH_RCP_DATA_W8_SORT, 1, 45, 0x0000 }, + { HSH_RCP_DATA_W9_OFS, 8, 51, 0x0000 }, + { HSH_RCP_DATA_W9_P, 1, 60, 0x0000 }, + { HSH_RCP_DATA_W9_PE, 5, 46, 0x0000 }, + { HSH_RCP_DATA_W9_SORT, 1, 59, 0x0000 }, + { HSH_RCP_DATA_WORD_MASK, 320, 62, 0x0000 }, +}; + +static nthw_fpga_register_init_s hsh_registers[] = { + { HSH_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, hsh_rcp_ctrl_fields }, + { HSH_RCP_DATA, 1, 743, NTHW_FPGA_REG_TYPE_WO, 0, 23, hsh_rcp_data_fields }, +}; + +static nthw_fpga_field_init_s ifr_counters_ctrl_fields[] = { + { IFR_COUNTERS_CTRL_ADR, 4, 0, 0x0000 }, + { IFR_COUNTERS_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s ifr_counters_data_fields[] = { + { IFR_COUNTERS_DATA_DROP, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s ifr_df_buf_ctrl_fields[] = { + { IFR_DF_BUF_CTRL_AVAILABLE, 11, 0, 0x0000 }, + { IFR_DF_BUF_CTRL_MTU_PROFILE, 16, 11, 0x0000 }, +}; + +static nthw_fpga_field_init_s ifr_df_buf_data_fields[] = { + { IFR_DF_BUF_DATA_FIFO_DAT, 128, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s ifr_rcp_ctrl_fields[] = { + { IFR_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { IFR_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s ifr_rcp_data_fields[] = { + { IFR_RCP_DATA_IPV4_DF_DROP, 1, 17, 0x0000 }, { IFR_RCP_DATA_IPV4_EN, 1, 0, 0x0000 }, + { IFR_RCP_DATA_IPV6_DROP, 1, 16, 0x0000 }, { IFR_RCP_DATA_IPV6_EN, 1, 1, 0x0000 }, + { IFR_RCP_DATA_MTU, 14, 2, 0x0000 }, +}; + +static nthw_fpga_register_init_s ifr_registers[] = { + { IFR_COUNTERS_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, ifr_counters_ctrl_fields }, + { IFR_COUNTERS_DATA, 5, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, ifr_counters_data_fields }, + { IFR_DF_BUF_CTRL, 2, 27, NTHW_FPGA_REG_TYPE_RO, 0, 2, ifr_df_buf_ctrl_fields }, + { IFR_DF_BUF_DATA, 3, 128, NTHW_FPGA_REG_TYPE_RO, 0, 1, ifr_df_buf_data_fields }, + { IFR_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, ifr_rcp_ctrl_fields }, + { IFR_RCP_DATA, 1, 18, NTHW_FPGA_REG_TYPE_WO, 0, 5, ifr_rcp_data_fields }, +}; + +static nthw_fpga_field_init_s iic_adr_fields[] = { + { IIC_ADR_SLV_ADR, 7, 1, 0 }, +}; + +static nthw_fpga_field_init_s iic_cr_fields[] = { + { IIC_CR_EN, 1, 0, 0 }, { IIC_CR_GC_EN, 1, 6, 0 }, { IIC_CR_MSMS, 1, 2, 0 }, + { IIC_CR_RST, 1, 7, 0 }, { IIC_CR_RSTA, 1, 5, 0 }, { IIC_CR_TX, 1, 3, 0 }, + { IIC_CR_TXAK, 1, 4, 0 }, { IIC_CR_TXFIFO_RESET, 1, 1, 0 }, +}; + +static nthw_fpga_field_init_s iic_dgie_fields[] = { + { IIC_DGIE_GIE, 1, 31, 0 }, +}; + +static nthw_fpga_field_init_s iic_gpo_fields[] = { + { IIC_GPO_GPO_VAL, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_ier_fields[] = { + { IIC_IER_INT0, 1, 0, 0 }, { IIC_IER_INT1, 1, 1, 0 }, { IIC_IER_INT2, 1, 2, 0 }, + { IIC_IER_INT3, 1, 3, 0 }, { IIC_IER_INT4, 1, 4, 0 }, { IIC_IER_INT5, 1, 5, 0 }, + { IIC_IER_INT6, 1, 6, 0 }, { IIC_IER_INT7, 1, 7, 0 }, +}; + +static nthw_fpga_field_init_s iic_isr_fields[] = { + { IIC_ISR_INT0, 1, 0, 0 }, { IIC_ISR_INT1, 1, 1, 0 }, { IIC_ISR_INT2, 1, 2, 0 }, + { IIC_ISR_INT3, 1, 3, 0 }, { IIC_ISR_INT4, 1, 4, 0 }, { IIC_ISR_INT5, 1, 5, 0 }, + { IIC_ISR_INT6, 1, 6, 0 }, { IIC_ISR_INT7, 1, 7, 0 }, +}; + +static nthw_fpga_field_init_s iic_rx_fifo_fields[] = { + { IIC_RX_FIFO_RXDATA, 8, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_rx_fifo_ocy_fields[] = { + { IIC_RX_FIFO_OCY_OCY_VAL, 4, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_rx_fifo_pirq_fields[] = { + { IIC_RX_FIFO_PIRQ_CMP_VAL, 4, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_softr_fields[] = { + { IIC_SOFTR_RKEY, 4, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s iic_sr_fields[] = { + { IIC_SR_AAS, 1, 1, 0 }, { IIC_SR_ABGC, 1, 0, 0 }, { IIC_SR_BB, 1, 2, 0 }, + { IIC_SR_RXFIFO_EMPTY, 1, 6, 1 }, { IIC_SR_RXFIFO_FULL, 1, 5, 0 }, { IIC_SR_SRW, 1, 3, 0 }, + { IIC_SR_TXFIFO_EMPTY, 1, 7, 1 }, { IIC_SR_TXFIFO_FULL, 1, 4, 0 }, +}; + +static nthw_fpga_field_init_s iic_tbuf_fields[] = { + { IIC_TBUF_TBUF_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_ten_adr_fields[] = { + { IIC_TEN_ADR_MSB_SLV_ADR, 3, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_thddat_fields[] = { + { IIC_THDDAT_THDDAT_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_thdsta_fields[] = { + { IIC_THDSTA_THDSTA_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_thigh_fields[] = { + { IIC_THIGH_THIGH_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_tlow_fields[] = { + { IIC_TLOW_TLOW_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_tsudat_fields[] = { + { IIC_TSUDAT_TSUDAT_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_tsusta_fields[] = { + { IIC_TSUSTA_TSUSTA_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_tsusto_fields[] = { + { IIC_TSUSTO_TSUSTO_VAL, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_tx_fifo_fields[] = { + { IIC_TX_FIFO_START, 1, 8, 0 }, + { IIC_TX_FIFO_STOP, 1, 9, 0 }, + { IIC_TX_FIFO_TXDATA, 8, 0, 0 }, +}; + +static nthw_fpga_field_init_s iic_tx_fifo_ocy_fields[] = { + { IIC_TX_FIFO_OCY_OCY_VAL, 4, 0, 0 }, +}; + +static nthw_fpga_register_init_s iic_registers[] = { + { IIC_ADR, 68, 8, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_adr_fields }, + { IIC_CR, 64, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, iic_cr_fields }, + { IIC_DGIE, 7, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_dgie_fields }, + { IIC_GPO, 73, 1, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_gpo_fields }, + { IIC_IER, 10, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, iic_ier_fields }, + { IIC_ISR, 8, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, iic_isr_fields }, + { IIC_RX_FIFO, 67, 8, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_rx_fifo_fields }, + { IIC_RX_FIFO_OCY, 70, 4, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_rx_fifo_ocy_fields }, + { IIC_RX_FIFO_PIRQ, 72, 4, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_rx_fifo_pirq_fields }, + { IIC_SOFTR, 16, 4, NTHW_FPGA_REG_TYPE_WO, 0, 1, iic_softr_fields }, + { IIC_SR, 65, 8, NTHW_FPGA_REG_TYPE_RO, 192, 8, iic_sr_fields }, + { IIC_TBUF, 78, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tbuf_fields }, + { IIC_TEN_ADR, 71, 3, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_ten_adr_fields }, + { IIC_THDDAT, 81, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_thddat_fields }, + { IIC_THDSTA, 76, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_thdsta_fields }, + { IIC_THIGH, 79, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_thigh_fields }, + { IIC_TLOW, 80, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tlow_fields }, + { IIC_TSUDAT, 77, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tsudat_fields }, + { IIC_TSUSTA, 74, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tsusta_fields }, + { IIC_TSUSTO, 75, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, iic_tsusto_fields }, + { IIC_TX_FIFO, 66, 10, NTHW_FPGA_REG_TYPE_WO, 0, 3, iic_tx_fifo_fields }, + { IIC_TX_FIFO_OCY, 69, 4, NTHW_FPGA_REG_TYPE_RO, 0, 1, iic_tx_fifo_ocy_fields }, +}; + +static nthw_fpga_field_init_s ins_rcp_ctrl_fields[] = { + { INS_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { INS_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s ins_rcp_data_fields[] = { + { INS_RCP_DATA_DYN, 5, 0, 0x0000 }, + { INS_RCP_DATA_LEN, 8, 15, 0x0000 }, + { INS_RCP_DATA_OFS, 10, 5, 0x0000 }, +}; + +static nthw_fpga_register_init_s ins_registers[] = { + { INS_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, ins_rcp_ctrl_fields }, + { INS_RCP_DATA, 1, 23, NTHW_FPGA_REG_TYPE_WO, 0, 3, ins_rcp_data_fields }, +}; + +static nthw_fpga_field_init_s km_cam_ctrl_fields[] = { + { KM_CAM_CTRL_ADR, 13, 0, 0x0000 }, + { KM_CAM_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_cam_data_fields[] = { + { KM_CAM_DATA_FT0, 4, 192, 0x0000 }, { KM_CAM_DATA_FT1, 4, 196, 0x0000 }, + { KM_CAM_DATA_FT2, 4, 200, 0x0000 }, { KM_CAM_DATA_FT3, 4, 204, 0x0000 }, + { KM_CAM_DATA_FT4, 4, 208, 0x0000 }, { KM_CAM_DATA_FT5, 4, 212, 0x0000 }, + { KM_CAM_DATA_W0, 32, 0, 0x0000 }, { KM_CAM_DATA_W1, 32, 32, 0x0000 }, + { KM_CAM_DATA_W2, 32, 64, 0x0000 }, { KM_CAM_DATA_W3, 32, 96, 0x0000 }, + { KM_CAM_DATA_W4, 32, 128, 0x0000 }, { KM_CAM_DATA_W5, 32, 160, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_rcp_ctrl_fields[] = { + { KM_RCP_CTRL_ADR, 5, 0, 0x0000 }, + { KM_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_rcp_data_fields[] = { + { KM_RCP_DATA_BANK_A, 12, 694, 0x0000 }, { KM_RCP_DATA_BANK_B, 12, 706, 0x0000 }, + { KM_RCP_DATA_DUAL, 1, 651, 0x0000 }, { KM_RCP_DATA_DW0_B_DYN, 5, 729, 0x0000 }, + { KM_RCP_DATA_DW0_B_OFS, 8, 734, 0x0000 }, { KM_RCP_DATA_DW10_DYN, 5, 55, 0x0000 }, + { KM_RCP_DATA_DW10_OFS, 8, 60, 0x0000 }, { KM_RCP_DATA_DW10_SEL_A, 2, 68, 0x0000 }, + { KM_RCP_DATA_DW10_SEL_B, 2, 70, 0x0000 }, { KM_RCP_DATA_DW2_B_DYN, 5, 742, 0x0000 }, + { KM_RCP_DATA_DW2_B_OFS, 8, 747, 0x0000 }, { KM_RCP_DATA_DW8_DYN, 5, 36, 0x0000 }, + { KM_RCP_DATA_DW8_OFS, 8, 41, 0x0000 }, { KM_RCP_DATA_DW8_SEL_A, 3, 49, 0x0000 }, + { KM_RCP_DATA_DW8_SEL_B, 3, 52, 0x0000 }, { KM_RCP_DATA_EL_A, 4, 653, 0x0000 }, + { KM_RCP_DATA_EL_B, 3, 657, 0x0000 }, { KM_RCP_DATA_FTM_A, 16, 662, 0x0000 }, + { KM_RCP_DATA_FTM_B, 16, 678, 0x0000 }, { KM_RCP_DATA_INFO_A, 1, 660, 0x0000 }, + { KM_RCP_DATA_INFO_B, 1, 661, 0x0000 }, { KM_RCP_DATA_KEYWAY_A, 1, 725, 0x0000 }, + { KM_RCP_DATA_KEYWAY_B, 1, 726, 0x0000 }, { KM_RCP_DATA_KL_A, 4, 718, 0x0000 }, + { KM_RCP_DATA_KL_B, 3, 722, 0x0000 }, { KM_RCP_DATA_MASK_A, 384, 75, 0x0000 }, + { KM_RCP_DATA_MASK_B, 192, 459, 0x0000 }, { KM_RCP_DATA_PAIRED, 1, 652, 0x0000 }, + { KM_RCP_DATA_QW0_DYN, 5, 0, 0x0000 }, { KM_RCP_DATA_QW0_OFS, 8, 5, 0x0000 }, + { KM_RCP_DATA_QW0_SEL_A, 3, 13, 0x0000 }, { KM_RCP_DATA_QW0_SEL_B, 3, 16, 0x0000 }, + { KM_RCP_DATA_QW4_DYN, 5, 19, 0x0000 }, { KM_RCP_DATA_QW4_OFS, 8, 24, 0x0000 }, + { KM_RCP_DATA_QW4_SEL_A, 2, 32, 0x0000 }, { KM_RCP_DATA_QW4_SEL_B, 2, 34, 0x0000 }, + { KM_RCP_DATA_SW4_B_DYN, 5, 755, 0x0000 }, { KM_RCP_DATA_SW4_B_OFS, 8, 760, 0x0000 }, + { KM_RCP_DATA_SW5_B_DYN, 5, 768, 0x0000 }, { KM_RCP_DATA_SW5_B_OFS, 8, 773, 0x0000 }, + { KM_RCP_DATA_SWX_CCH, 1, 72, 0x0000 }, { KM_RCP_DATA_SWX_SEL_A, 1, 73, 0x0000 }, + { KM_RCP_DATA_SWX_SEL_B, 1, 74, 0x0000 }, { KM_RCP_DATA_SYNERGY_MODE, 2, 727, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_status_fields[] = { + { KM_STATUS_TCQ_RDY, 1, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_tcam_ctrl_fields[] = { + { KM_TCAM_CTRL_ADR, 14, 0, 0x0000 }, + { KM_TCAM_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_tcam_data_fields[] = { + { KM_TCAM_DATA_T, 72, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_tci_ctrl_fields[] = { + { KM_TCI_CTRL_ADR, 10, 0, 0x0000 }, + { KM_TCI_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_tci_data_fields[] = { + { KM_TCI_DATA_COLOR, 32, 0, 0x0000 }, + { KM_TCI_DATA_FT, 4, 32, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_tcq_ctrl_fields[] = { + { KM_TCQ_CTRL_ADR, 7, 0, 0x0000 }, + { KM_TCQ_CTRL_CNT, 5, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s km_tcq_data_fields[] = { + { KM_TCQ_DATA_BANK_MASK, 12, 0, 0x0000 }, + { KM_TCQ_DATA_QUAL, 3, 12, 0x0000 }, +}; + +static nthw_fpga_register_init_s km_registers[] = { + { KM_CAM_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_cam_ctrl_fields }, + { KM_CAM_DATA, 3, 216, NTHW_FPGA_REG_TYPE_WO, 0, 12, km_cam_data_fields }, + { KM_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_rcp_ctrl_fields }, + { KM_RCP_DATA, 1, 781, NTHW_FPGA_REG_TYPE_WO, 0, 44, km_rcp_data_fields }, + { KM_STATUS, 10, 1, NTHW_FPGA_REG_TYPE_RO, 0, 1, km_status_fields }, + { KM_TCAM_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tcam_ctrl_fields }, + { KM_TCAM_DATA, 5, 72, NTHW_FPGA_REG_TYPE_WO, 0, 1, km_tcam_data_fields }, + { KM_TCI_CTRL, 6, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tci_ctrl_fields }, + { KM_TCI_DATA, 7, 36, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tci_data_fields }, + { KM_TCQ_CTRL, 8, 21, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tcq_ctrl_fields }, + { KM_TCQ_DATA, 9, 15, NTHW_FPGA_REG_TYPE_WO, 0, 2, km_tcq_data_fields }, +}; + +static nthw_fpga_field_init_s mac_pcs_bad_code_fields[] = { + { MAC_PCS_BAD_CODE_CODE_ERR, 16, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_bip_err_fields[] = { + { MAC_PCS_BIP_ERR_BIP_ERR, 640, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_block_lock_fields[] = { + { MAC_PCS_BLOCK_LOCK_LOCK, 20, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_block_lock_chg_fields[] = { + { MAC_PCS_BLOCK_LOCK_CHG_LOCK_CHG, 20, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_debounce_ctrl_fields[] = { + { MAC_PCS_DEBOUNCE_CTRL_NT_DEBOUNCE_LATENCY, 8, 8, 10 }, + { MAC_PCS_DEBOUNCE_CTRL_NT_FORCE_LINK_DOWN, 1, 16, 0 }, + { MAC_PCS_DEBOUNCE_CTRL_NT_LINKUP_LATENCY, 8, 0, 10 }, + { MAC_PCS_DEBOUNCE_CTRL_NT_PORT_CTRL, 2, 17, 2 }, +}; + +static nthw_fpga_field_init_s mac_pcs_drp_ctrl_fields[] = { + { MAC_PCS_DRP_CTRL_ADR, 10, 16, 0 }, { MAC_PCS_DRP_CTRL_DATA, 16, 0, 0 }, + { MAC_PCS_DRP_CTRL_DBG_BUSY, 1, 30, 0x0000 }, { MAC_PCS_DRP_CTRL_DONE, 1, 31, 0x0000 }, { MAC_PCS_DRP_CTRL_MOD_ADR, 3, 26, 0 }, { MAC_PCS_DRP_CTRL_WREN, 1, 29, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_ctrl_fields[] = { - { MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 }, +static nthw_fpga_field_init_s mac_pcs_fec_ctrl_fields[] = { + { MAC_PCS_FEC_CTRL_RS_FEC_CTRL_IN, 5, 0, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_cw_cnt_fields[] = { + { MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_0_fields[] = { + { MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_1_fields[] = { + { MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_2_fields[] = { + { MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_3_fields[] = { + { MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_0_fields[] = { + { MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_1_fields[] = { + { MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_2_fields[] = { + { MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_3_fields[] = { + { MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_lane_map_fields[] = { + { MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_stat_fields[] = { + { MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 }, + { MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 }, + { MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 }, + { MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 }, + { MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 }, + { MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 }, + { MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 }, + { MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 }, + { MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 }, + { MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 }, + { MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_fec_ucw_cnt_fields[] = { + { MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_ctl_rx_fields[] = { + { MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 }, { MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 }, + { MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 }, { MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 }, + { MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 }, { MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 }, + { MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 }, { MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 }, + { MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 }, { MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 }, + { MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 }, { MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 }, + { MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 }, { MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 }, + { MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 }, { MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 }, + { MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 }, { MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 }, + { MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 }, { MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_ctl_tx_fields[] = { + { MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 }, { MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 }, + { MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 }, { MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 }, + { MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 }, { MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 }, + { MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 }, { MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_diff_ctl_fields[] = { + { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 }, + { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 }, + { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 }, + { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_loop_fields[] = { + { MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 }, + { MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 }, + { MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 }, + { MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_post_cursor_fields[] = { + { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 }, + { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 }, + { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 }, + { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_prbs_sel_fields[] = { + { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 }, + { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 }, + { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 }, + { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 }, + { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 }, + { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 }, + { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 }, + { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_pre_cursor_fields[] = { + { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 }, + { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 }, + { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 }, + { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_rx_buf_stat_fields[] = { + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 }, + { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_scan_ctl_fields[] = { + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 }, + { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 }, + { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_scan_stat_fields[] = { + { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 }, + { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_gty_stat_fields[] = { + { MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 }, + { MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 }, + { MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 }, + { MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 }, + { MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_link_summary_fields[] = { + { MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 }, + { MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 }, + { MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 }, + { MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 }, + { MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_mac_pcs_config_fields[] = { + { MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 }, + { MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_max_pkt_len_fields[] = { + { MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_phymac_misc_fields[] = { + { MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 }, + { MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 }, + { MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 }, + { MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 }, + { MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 }, +}; + +static nthw_fpga_field_init_s mac_pcs_phy_stat_fields[] = { + { MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 }, + { MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 }, + { MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_stat_pcs_rx_fields[] = { + { MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_stat_pcs_rx_latch_fields[] = { + { MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 }, + { MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_stat_pcs_tx_fields[] = { + { MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 }, + { MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_synced_fields[] = { + { MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_synced_err_fields[] = { + { MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_test_err_fields[] = { + { MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_timestamp_comp_fields[] = { + { MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 }, + { MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 }, +}; + +static nthw_fpga_field_init_s mac_pcs_vl_demuxed_fields[] = { + { MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_pcs_vl_demuxed_chg_fields[] = { + { MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 }, +}; + +static nthw_fpga_register_init_s mac_pcs_registers[] = { + { MAC_PCS_BAD_CODE, 26, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_bad_code_fields }, + { MAC_PCS_BIP_ERR, 31, 640, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_bip_err_fields }, + { MAC_PCS_BLOCK_LOCK, 27, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_block_lock_fields }, + { + MAC_PCS_BLOCK_LOCK_CHG, 28, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_block_lock_chg_fields + }, + { + MAC_PCS_DEBOUNCE_CTRL, 1, 19, NTHW_FPGA_REG_TYPE_RW, 264714, 4, + mac_pcs_debounce_ctrl_fields + }, + { MAC_PCS_DRP_CTRL, 43, 32, NTHW_FPGA_REG_TYPE_MIXED, 0, 6, mac_pcs_drp_ctrl_fields }, + { MAC_PCS_FEC_CTRL, 2, 5, NTHW_FPGA_REG_TYPE_RW, 0, 1, mac_pcs_fec_ctrl_fields }, + { MAC_PCS_FEC_CW_CNT, 9, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_fec_cw_cnt_fields }, + { + MAC_PCS_FEC_ERR_CNT_0, 11, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_err_cnt_0_fields + }, + { + MAC_PCS_FEC_ERR_CNT_1, 12, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_err_cnt_1_fields + }, + { + MAC_PCS_FEC_ERR_CNT_2, 13, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_err_cnt_2_fields + }, + { + MAC_PCS_FEC_ERR_CNT_3, 14, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_err_cnt_3_fields + }, + { + MAC_PCS_FEC_LANE_DLY_0, 5, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_lane_dly_0_fields + }, + { + MAC_PCS_FEC_LANE_DLY_1, 6, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_lane_dly_1_fields + }, + { + MAC_PCS_FEC_LANE_DLY_2, 7, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_lane_dly_2_fields + }, + { + MAC_PCS_FEC_LANE_DLY_3, 8, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_fec_lane_dly_3_fields + }, + { MAC_PCS_FEC_LANE_MAP, 4, 8, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_fec_lane_map_fields }, + { MAC_PCS_FEC_STAT, 3, 11, NTHW_FPGA_REG_TYPE_RO, 0, 11, mac_pcs_fec_stat_fields }, + { MAC_PCS_FEC_UCW_CNT, 10, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_fec_ucw_cnt_fields }, + { MAC_PCS_GTY_CTL_RX, 38, 28, NTHW_FPGA_REG_TYPE_RW, 0, 20, mac_pcs_gty_ctl_rx_fields }, + { MAC_PCS_GTY_CTL_TX, 39, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, mac_pcs_gty_ctl_tx_fields }, + { + MAC_PCS_GTY_DIFF_CTL, 35, 20, NTHW_FPGA_REG_TYPE_RW, 811800, 4, + mac_pcs_gty_diff_ctl_fields + }, + { MAC_PCS_GTY_LOOP, 20, 12, NTHW_FPGA_REG_TYPE_RW, 0, 4, mac_pcs_gty_loop_fields }, + { + MAC_PCS_GTY_POST_CURSOR, 36, 20, NTHW_FPGA_REG_TYPE_RW, 676500, 4, + mac_pcs_gty_post_cursor_fields + }, + { MAC_PCS_GTY_PRBS_SEL, 40, 32, NTHW_FPGA_REG_TYPE_RW, 0, 8, mac_pcs_gty_prbs_sel_fields }, + { + MAC_PCS_GTY_PRE_CURSOR, 37, 20, NTHW_FPGA_REG_TYPE_RW, 0, 4, + mac_pcs_gty_pre_cursor_fields + }, + { + MAC_PCS_GTY_RX_BUF_STAT, 34, 24, NTHW_FPGA_REG_TYPE_RO, 0, 8, + mac_pcs_gty_rx_buf_stat_fields + }, + { + MAC_PCS_GTY_SCAN_CTL, 41, 16, NTHW_FPGA_REG_TYPE_RW, 0, 16, + mac_pcs_gty_scan_ctl_fields + }, + { + MAC_PCS_GTY_SCAN_STAT, 42, 8, NTHW_FPGA_REG_TYPE_RO, 0, 8, + mac_pcs_gty_scan_stat_fields + }, + { MAC_PCS_GTY_STAT, 33, 16, NTHW_FPGA_REG_TYPE_RO, 0, 12, mac_pcs_gty_stat_fields }, + { MAC_PCS_LINK_SUMMARY, 0, 19, NTHW_FPGA_REG_TYPE_RO, 0, 11, mac_pcs_link_summary_fields }, + { + MAC_PCS_MAC_PCS_CONFIG, 19, 12, NTHW_FPGA_REG_TYPE_RW, 272, 12, + mac_pcs_mac_pcs_config_fields + }, + { + MAC_PCS_MAX_PKT_LEN, 17, 14, NTHW_FPGA_REG_TYPE_RW, 10000, 1, + mac_pcs_max_pkt_len_fields + }, + { MAC_PCS_PHYMAC_MISC, 16, 8, NTHW_FPGA_REG_TYPE_MIXED, 9, 5, mac_pcs_phymac_misc_fields }, + { MAC_PCS_PHY_STAT, 15, 3, NTHW_FPGA_REG_TYPE_RO, 0, 3, mac_pcs_phy_stat_fields }, + { MAC_PCS_STAT_PCS_RX, 21, 10, NTHW_FPGA_REG_TYPE_RO, 0, 10, mac_pcs_stat_pcs_rx_fields }, + { + MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, NTHW_FPGA_REG_TYPE_RO, 0, 10, + mac_pcs_stat_pcs_rx_latch_fields + }, + { MAC_PCS_STAT_PCS_TX, 23, 10, NTHW_FPGA_REG_TYPE_RO, 0, 10, mac_pcs_stat_pcs_tx_fields }, + { MAC_PCS_SYNCED, 24, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_synced_fields }, + { MAC_PCS_SYNCED_ERR, 25, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_synced_err_fields }, + { MAC_PCS_TEST_ERR, 32, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_test_err_fields }, + { + MAC_PCS_TIMESTAMP_COMP, 18, 32, NTHW_FPGA_REG_TYPE_RW, 94373291, 2, + mac_pcs_timestamp_comp_fields + }, + { MAC_PCS_VL_DEMUXED, 29, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_vl_demuxed_fields }, + { + MAC_PCS_VL_DEMUXED_CHG, 30, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_pcs_vl_demuxed_chg_fields + }, +}; + +static nthw_fpga_field_init_s mac_rx_bad_fcs_fields[] = { + { MAC_RX_BAD_FCS_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_fragment_fields[] = { + { MAC_RX_FRAGMENT_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_packet_bad_fcs_fields[] = { + { MAC_RX_PACKET_BAD_FCS_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_packet_small_fields[] = { + { MAC_RX_PACKET_SMALL_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_total_bytes_fields[] = { + { MAC_RX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_total_good_bytes_fields[] = { + { MAC_RX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_total_good_packets_fields[] = { + { MAC_RX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_total_packets_fields[] = { + { MAC_RX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_rx_undersize_fields[] = { + { MAC_RX_UNDERSIZE_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_register_init_s mac_rx_registers[] = { + { MAC_RX_BAD_FCS, 0, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_rx_bad_fcs_fields }, + { MAC_RX_FRAGMENT, 6, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_rx_fragment_fields }, + { + MAC_RX_PACKET_BAD_FCS, 7, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_rx_packet_bad_fcs_fields + }, + { MAC_RX_PACKET_SMALL, 3, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_rx_packet_small_fields }, + { MAC_RX_TOTAL_BYTES, 4, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_rx_total_bytes_fields }, + { + MAC_RX_TOTAL_GOOD_BYTES, 5, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_rx_total_good_bytes_fields + }, + { + MAC_RX_TOTAL_GOOD_PACKETS, 2, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_rx_total_good_packets_fields + }, + { MAC_RX_TOTAL_PACKETS, 1, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_rx_total_packets_fields }, + { MAC_RX_UNDERSIZE, 8, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_rx_undersize_fields }, +}; + +static nthw_fpga_field_init_s mac_tx_packet_small_fields[] = { + { MAC_TX_PACKET_SMALL_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_tx_total_bytes_fields[] = { + { MAC_TX_TOTAL_BYTES_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_tx_total_good_bytes_fields[] = { + { MAC_TX_TOTAL_GOOD_BYTES_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_tx_total_good_packets_fields[] = { + { MAC_TX_TOTAL_GOOD_PACKETS_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s mac_tx_total_packets_fields[] = { + { MAC_TX_TOTAL_PACKETS_COUNT, 32, 0, 0x0000 }, +}; + +static nthw_fpga_register_init_s mac_tx_registers[] = { + { MAC_TX_PACKET_SMALL, 2, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_tx_packet_small_fields }, + { MAC_TX_TOTAL_BYTES, 3, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_tx_total_bytes_fields }, + { + MAC_TX_TOTAL_GOOD_BYTES, 4, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_tx_total_good_bytes_fields + }, + { + MAC_TX_TOTAL_GOOD_PACKETS, 1, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + mac_tx_total_good_packets_fields + }, + { MAC_TX_TOTAL_PACKETS, 0, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_tx_total_packets_fields }, +}; + +static nthw_fpga_field_init_s pci_rd_tg_tg_ctrl_fields[] = { + { PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_rd_tg_tg_rdaddr_fields[] = { + { PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_rd_tg_tg_rddata0_fields[] = { + { PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_rd_tg_tg_rddata1_fields[] = { + { PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_rd_tg_tg_rddata2_fields[] = { + { PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 }, + { PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 }, + { PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 }, + { PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 }, +}; + +static nthw_fpga_field_init_s pci_rd_tg_tg_rd_run_fields[] = { + { PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 }, +}; + +static nthw_fpga_register_init_s pci_rd_tg_registers[] = { + { PCI_RD_TG_TG_CTRL, 5, 1, NTHW_FPGA_REG_TYPE_RO, 0, 1, pci_rd_tg_tg_ctrl_fields }, + { PCI_RD_TG_TG_RDADDR, 3, 9, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rdaddr_fields }, + { PCI_RD_TG_TG_RDDATA0, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rddata0_fields }, + { PCI_RD_TG_TG_RDDATA1, 1, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rddata1_fields }, + { PCI_RD_TG_TG_RDDATA2, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 4, pci_rd_tg_tg_rddata2_fields }, + { PCI_RD_TG_TG_RD_RUN, 4, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rd_run_fields }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_ctrl_fields[] = { + { PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_seq_fields[] = { + { PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_wraddr_fields[] = { + { PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_wrdata0_fields[] = { + { PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_wrdata1_fields[] = { + { PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_wrdata2_fields[] = { + { PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 }, { PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 }, + { PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 }, { PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 }, + { PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 }, +}; + +static nthw_fpga_field_init_s pci_wr_tg_tg_wr_run_fields[] = { + { PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 }, +}; + +static nthw_fpga_register_init_s pci_wr_tg_registers[] = { + { PCI_WR_TG_TG_CTRL, 5, 1, NTHW_FPGA_REG_TYPE_RO, 0, 1, pci_wr_tg_tg_ctrl_fields }, + { PCI_WR_TG_TG_SEQ, 6, 16, NTHW_FPGA_REG_TYPE_RW, 0, 1, pci_wr_tg_tg_seq_fields }, + { PCI_WR_TG_TG_WRADDR, 3, 9, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wraddr_fields }, + { PCI_WR_TG_TG_WRDATA0, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wrdata0_fields }, + { PCI_WR_TG_TG_WRDATA1, 1, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wrdata1_fields }, + { PCI_WR_TG_TG_WRDATA2, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 5, pci_wr_tg_tg_wrdata2_fields }, + { PCI_WR_TG_TG_WR_RUN, 4, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wr_run_fields }, +}; + +static nthw_fpga_field_init_s pdb_config_fields[] = { + { PDB_CONFIG_PORT_OFS, 6, 3, 0 }, + { PDB_CONFIG_TS_FORMAT, 3, 0, 0 }, +}; + +static nthw_fpga_field_init_s pdb_rcp_ctrl_fields[] = { + { PDB_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { PDB_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s pdb_rcp_data_fields[] = { + { PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 }, + { PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 }, + { PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 }, + { PDB_RCP_DATA_DESC_LEN, 5, 4, 0 }, + { PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 }, + { PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 }, + { PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 }, + { PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 }, + { PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 }, + { PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 }, + { PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 }, + { PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 }, + { PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 }, + { PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 }, + { PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 }, + { PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 }, + { PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 }, + { PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 }, +}; + +static nthw_fpga_register_init_s pdb_registers[] = { + { PDB_CONFIG, 2, 10, NTHW_FPGA_REG_TYPE_WO, 0, 2, pdb_config_fields }, + { PDB_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields }, + { PDB_RCP_DATA, 1, 67, NTHW_FPGA_REG_TYPE_WO, 0, 18, pdb_rcp_data_fields }, +}; + +static nthw_fpga_field_init_s qsl_qen_ctrl_fields[] = { + { QSL_QEN_CTRL_ADR, 5, 0, 0x0000 }, + { QSL_QEN_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_qen_data_fields[] = { + { QSL_QEN_DATA_EN, 4, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_qst_ctrl_fields[] = { + { QSL_QST_CTRL_ADR, 12, 0, 0x0000 }, + { QSL_QST_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_qst_data_fields[] = { + { QSL_QST_DATA_LRE, 1, 9, 0x0000 }, { QSL_QST_DATA_QEN, 1, 7, 0x0000 }, + { QSL_QST_DATA_QUEUE, 7, 0, 0x0000 }, { QSL_QST_DATA_TCI, 16, 10, 0x0000 }, + { QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 }, { QSL_QST_DATA_VEN, 1, 26, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_rcp_ctrl_fields[] = { + { QSL_RCP_CTRL_ADR, 5, 0, 0x0000 }, + { QSL_RCP_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_rcp_data_fields[] = { + { QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 }, { QSL_RCP_DATA_DROP, 2, 1, 0x0000 }, + { QSL_RCP_DATA_LR, 2, 51, 0x0000 }, { QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 }, + { QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 }, { QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 }, + { QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 }, { QSL_RCP_DATA_TSA, 1, 53, 0x0000 }, + { QSL_RCP_DATA_VLI, 2, 54, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_unmq_ctrl_fields[] = { + { QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 }, + { QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 }, +}; + +static nthw_fpga_field_init_s qsl_unmq_data_fields[] = { + { QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 }, + { QSL_UNMQ_DATA_EN, 1, 7, 0x0000 }, +}; + +static nthw_fpga_register_init_s qsl_registers[] = { + { QSL_QEN_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_qen_ctrl_fields }, + { QSL_QEN_DATA, 5, 4, NTHW_FPGA_REG_TYPE_WO, 0, 1, qsl_qen_data_fields }, + { QSL_QST_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_qst_ctrl_fields }, + { QSL_QST_DATA, 3, 27, NTHW_FPGA_REG_TYPE_WO, 0, 6, qsl_qst_data_fields }, + { QSL_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields }, + { QSL_RCP_DATA, 1, 56, NTHW_FPGA_REG_TYPE_WO, 0, 9, qsl_rcp_data_fields }, + { QSL_UNMQ_CTRL, 6, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields }, + { QSL_UNMQ_DATA, 7, 8, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_unmq_data_fields }, +}; + +static nthw_fpga_field_init_s rac_dbg_ctrl_fields[] = { + { RAC_DBG_CTRL_C, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s rac_dbg_data_fields[] = { + { RAC_DBG_DATA_D, 32, 0, 0x0000 }, +}; + +static nthw_fpga_field_init_s rac_rab_buf_free_fields[] = { + { RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 }, { RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 }, + { RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 }, { RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 }, + { RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 }, +}; + +static nthw_fpga_field_init_s rac_rab_buf_used_fields[] = { + { RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 }, + { RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 }, + { RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_cw_cnt_fields[] = { - { MAC_PCS_FEC_CW_CNT_CW_CNT, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ib_hi_fields[] = { + { RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_0_fields[] = { - { MAC_PCS_FEC_ERR_CNT_0_ERR_CNT, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ib_lo_fields[] = { + { RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_1_fields[] = { - { MAC_PCS_FEC_ERR_CNT_1_ERR_CNT, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ib_rd_fields[] = { + { RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_2_fields[] = { - { MAC_PCS_FEC_ERR_CNT_2_ERR_CNT, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ib_wr_fields[] = { + { RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_err_cnt_3_fields[] = { - { MAC_PCS_FEC_ERR_CNT_3_ERR_CNT, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ob_hi_fields[] = { + { RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_0_fields[] = { - { MAC_PCS_FEC_LANE_DLY_0_DLY, 14, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ob_lo_fields[] = { + { RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_1_fields[] = { - { MAC_PCS_FEC_LANE_DLY_1_DLY, 14, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_dma_ob_wr_fields[] = { + { RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_2_fields[] = { - { MAC_PCS_FEC_LANE_DLY_2_DLY, 14, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_ib_data_fields[] = { + { RAC_RAB_IB_DATA_D, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_lane_dly_3_fields[] = { - { MAC_PCS_FEC_LANE_DLY_3_DLY, 14, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_init_fields[] = { + { RAC_RAB_INIT_RAB, 3, 0, 7 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_lane_map_fields[] = { - { MAC_PCS_FEC_LANE_MAP_MAPPING, 8, 0, 0x0000 }, +static nthw_fpga_field_init_s rac_rab_ob_data_fields[] = { + { RAC_RAB_OB_DATA_D, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_fec_stat_fields[] = { - { MAC_PCS_FEC_STAT_AM_LOCK, 1, 10, 0x0000 }, - { MAC_PCS_FEC_STAT_AM_LOCK_0, 1, 3, 0x0000 }, - { MAC_PCS_FEC_STAT_AM_LOCK_1, 1, 4, 0x0000 }, - { MAC_PCS_FEC_STAT_AM_LOCK_2, 1, 5, 0x0000 }, - { MAC_PCS_FEC_STAT_AM_LOCK_3, 1, 6, 0x0000 }, - { MAC_PCS_FEC_STAT_BLOCK_LOCK, 1, 9, 0x0000 }, - { MAC_PCS_FEC_STAT_BYPASS, 1, 0, 0x0000 }, - { MAC_PCS_FEC_STAT_FEC_LANE_ALGN, 1, 7, 0x0000 }, - { MAC_PCS_FEC_STAT_HI_SER, 1, 2, 0x0000 }, - { MAC_PCS_FEC_STAT_PCS_LANE_ALGN, 1, 8, 0x0000 }, - { MAC_PCS_FEC_STAT_VALID, 1, 1, 0x0000 }, +static nthw_fpga_register_init_s rac_registers[] = { + { RAC_DBG_CTRL, 4200, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, rac_dbg_ctrl_fields }, + { RAC_DBG_DATA, 4208, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, rac_dbg_data_fields }, + { + RAC_RAB_BUF_FREE, 4176, 32, NTHW_FPGA_REG_TYPE_MIXED, 33489407, 5, + rac_rab_buf_free_fields + }, + { RAC_RAB_BUF_USED, 4184, 32, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, rac_rab_buf_used_fields }, + { RAC_RAB_DMA_IB_HI, 4360, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ib_hi_fields }, + { RAC_RAB_DMA_IB_LO, 4352, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ib_lo_fields }, + { RAC_RAB_DMA_IB_RD, 4424, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, rac_rab_dma_ib_rd_fields }, + { RAC_RAB_DMA_IB_WR, 4416, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ib_wr_fields }, + { RAC_RAB_DMA_OB_HI, 4376, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ob_hi_fields }, + { RAC_RAB_DMA_OB_LO, 4368, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ob_lo_fields }, + { RAC_RAB_DMA_OB_WR, 4480, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, rac_rab_dma_ob_wr_fields }, + { RAC_RAB_IB_DATA, 4160, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_ib_data_fields }, + { RAC_RAB_INIT, 4192, 3, NTHW_FPGA_REG_TYPE_RW, 7, 1, rac_rab_init_fields }, + { RAC_RAB_OB_DATA, 4168, 32, NTHW_FPGA_REG_TYPE_RC1, 0, 1, rac_rab_ob_data_fields }, }; -static nthw_fpga_field_init_s mac_pcs_fec_ucw_cnt_fields[] = { - { MAC_PCS_FEC_UCW_CNT_UCW_CNT, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s rmc_ctrl_fields[] = { + { RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 }, { RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 }, + { RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 }, { RMC_CTRL_BLOCK_STATT, 1, 0, 1 }, + { RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_ctl_rx_fields[] = { - { MAC_PCS_GTY_CTL_RX_CDR_HOLD_0, 1, 24, 0 }, { MAC_PCS_GTY_CTL_RX_CDR_HOLD_1, 1, 25, 0 }, - { MAC_PCS_GTY_CTL_RX_CDR_HOLD_2, 1, 26, 0 }, { MAC_PCS_GTY_CTL_RX_CDR_HOLD_3, 1, 27, 0 }, - { MAC_PCS_GTY_CTL_RX_EQUA_RST_0, 1, 20, 0 }, { MAC_PCS_GTY_CTL_RX_EQUA_RST_1, 1, 21, 0 }, - { MAC_PCS_GTY_CTL_RX_EQUA_RST_2, 1, 22, 0 }, { MAC_PCS_GTY_CTL_RX_EQUA_RST_3, 1, 23, 0 }, - { MAC_PCS_GTY_CTL_RX_LPM_EN_0, 1, 16, 0 }, { MAC_PCS_GTY_CTL_RX_LPM_EN_1, 1, 17, 0 }, - { MAC_PCS_GTY_CTL_RX_LPM_EN_2, 1, 18, 0 }, { MAC_PCS_GTY_CTL_RX_LPM_EN_3, 1, 19, 0 }, - { MAC_PCS_GTY_CTL_RX_POLARITY_0, 1, 0, 0 }, { MAC_PCS_GTY_CTL_RX_POLARITY_1, 1, 1, 0 }, - { MAC_PCS_GTY_CTL_RX_POLARITY_2, 1, 2, 0 }, { MAC_PCS_GTY_CTL_RX_POLARITY_3, 1, 3, 0 }, - { MAC_PCS_GTY_CTL_RX_RATE_0, 3, 4, 0 }, { MAC_PCS_GTY_CTL_RX_RATE_1, 3, 7, 0 }, - { MAC_PCS_GTY_CTL_RX_RATE_2, 3, 10, 0 }, { MAC_PCS_GTY_CTL_RX_RATE_3, 3, 13, 0 }, +static nthw_fpga_field_init_s rmc_dbg_fields[] = { + { RMC_DBG_MERGE, 31, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_ctl_tx_fields[] = { - { MAC_PCS_GTY_CTL_TX_INHIBIT_0, 1, 4, 0 }, { MAC_PCS_GTY_CTL_TX_INHIBIT_1, 1, 5, 0 }, - { MAC_PCS_GTY_CTL_TX_INHIBIT_2, 1, 6, 0 }, { MAC_PCS_GTY_CTL_TX_INHIBIT_3, 1, 7, 0 }, - { MAC_PCS_GTY_CTL_TX_POLARITY_0, 1, 0, 0 }, { MAC_PCS_GTY_CTL_TX_POLARITY_1, 1, 1, 0 }, - { MAC_PCS_GTY_CTL_TX_POLARITY_2, 1, 2, 0 }, { MAC_PCS_GTY_CTL_TX_POLARITY_3, 1, 3, 0 }, +static nthw_fpga_field_init_s rmc_mac_if_fields[] = { + { RMC_MAC_IF_ERR, 31, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_diff_ctl_fields[] = { - { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_0, 5, 0, 24 }, - { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_1, 5, 5, 24 }, - { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_2, 5, 10, 24 }, - { MAC_PCS_GTY_DIFF_CTL_TX_DIFF_CTL_3, 5, 15, 24 }, +static nthw_fpga_field_init_s rmc_status_fields[] = { + { RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 }, + { RMC_STATUS_SF_RAM_OF, 1, 0, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_loop_fields[] = { - { MAC_PCS_GTY_LOOP_GT_LOOP_0, 3, 0, 0 }, - { MAC_PCS_GTY_LOOP_GT_LOOP_1, 3, 3, 0 }, - { MAC_PCS_GTY_LOOP_GT_LOOP_2, 3, 6, 0 }, - { MAC_PCS_GTY_LOOP_GT_LOOP_3, 3, 9, 0 }, +static nthw_fpga_register_init_s rmc_registers[] = { + { RMC_CTRL, 0, 25, NTHW_FPGA_REG_TYPE_RW, 771, 5, rmc_ctrl_fields }, + { RMC_DBG, 2, 31, NTHW_FPGA_REG_TYPE_RO, 0, 1, rmc_dbg_fields }, + { RMC_MAC_IF, 3, 31, NTHW_FPGA_REG_TYPE_RO, 0, 1, rmc_mac_if_fields }, + { RMC_STATUS, 1, 17, NTHW_FPGA_REG_TYPE_RO, 0, 2, rmc_status_fields }, }; -static nthw_fpga_field_init_s mac_pcs_gty_post_cursor_fields[] = { - { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_0, 5, 0, 20 }, - { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_1, 5, 5, 20 }, - { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_2, 5, 10, 20 }, - { MAC_PCS_GTY_POST_CURSOR_TX_POST_CSR_3, 5, 15, 20 }, +static nthw_fpga_field_init_s rpl_ext_ctrl_fields[] = { + { RPL_EXT_CTRL_ADR, 10, 0, 0x0000 }, + { RPL_EXT_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_prbs_sel_fields[] = { - { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_0, 4, 16, 0 }, - { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_1, 4, 20, 0 }, - { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_2, 4, 24, 0 }, - { MAC_PCS_GTY_PRBS_SEL_RX_PRBS_SEL_3, 4, 28, 0 }, - { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_0, 4, 0, 0 }, - { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_1, 4, 4, 0 }, - { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_2, 4, 8, 0 }, - { MAC_PCS_GTY_PRBS_SEL_TX_PRBS_SEL_3, 4, 12, 0 }, +static nthw_fpga_field_init_s rpl_ext_data_fields[] = { + { RPL_EXT_DATA_RPL_PTR, 12, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_pre_cursor_fields[] = { - { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_0, 5, 0, 0 }, - { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_1, 5, 5, 0 }, - { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_2, 5, 10, 0 }, - { MAC_PCS_GTY_PRE_CURSOR_TX_PRE_CSR_3, 5, 15, 0 }, +static nthw_fpga_field_init_s rpl_rcp_ctrl_fields[] = { + { RPL_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { RPL_RCP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_rx_buf_stat_fields[] = { - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_0, 3, 0, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_1, 3, 3, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_2, 3, 6, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_3, 3, 9, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_0, 3, 12, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_1, 3, 15, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_2, 3, 18, 0x0000 }, - { MAC_PCS_GTY_RX_BUF_STAT_RX_BUF_STAT_CHANGED_3, 3, 21, 0x0000 }, +static nthw_fpga_field_init_s rpl_rcp_data_fields[] = { + { RPL_RCP_DATA_DYN, 5, 0, 0x0000 }, { RPL_RCP_DATA_ETH_TYPE_WR, 1, 36, 0x0000 }, + { RPL_RCP_DATA_EXT_PRIO, 1, 35, 0x0000 }, { RPL_RCP_DATA_LEN, 8, 15, 0x0000 }, + { RPL_RCP_DATA_OFS, 10, 5, 0x0000 }, { RPL_RCP_DATA_RPL_PTR, 12, 23, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_scan_ctl_fields[] = { - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_0, 1, 0, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_1, 1, 1, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_2, 1, 2, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_RST_3, 1, 3, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_0, 1, 4, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_1, 1, 5, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_2, 1, 6, 0 }, - { MAC_PCS_GTY_SCAN_CTL_EYE_SCAN_TRG_3, 1, 7, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_0, 1, 12, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_1, 1, 13, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_2, 1, 14, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_ERR_INS_3, 1, 15, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_0, 1, 8, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_1, 1, 9, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_2, 1, 10, 0 }, - { MAC_PCS_GTY_SCAN_CTL_PRBS_RST_3, 1, 11, 0 }, +static nthw_fpga_field_init_s rpl_rpl_ctrl_fields[] = { + { RPL_RPL_CTRL_ADR, 12, 0, 0x0000 }, + { RPL_RPL_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_scan_stat_fields[] = { - { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_0, 1, 0, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_1, 1, 1, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_2, 1, 2, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_EYE_SCAN_ERR_3, 1, 3, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_0, 1, 4, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_1, 1, 5, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_2, 1, 6, 0x0000 }, - { MAC_PCS_GTY_SCAN_STAT_PRBS_ERR_3, 1, 7, 0x0000 }, +static nthw_fpga_field_init_s rpl_rpl_data_fields[] = { + { RPL_RPL_DATA_VALUE, 128, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_gty_stat_fields[] = { - { MAC_PCS_GTY_STAT_RX_RST_DONE_0, 1, 4, 0x0000 }, - { MAC_PCS_GTY_STAT_RX_RST_DONE_1, 1, 5, 0x0000 }, - { MAC_PCS_GTY_STAT_RX_RST_DONE_2, 1, 6, 0x0000 }, - { MAC_PCS_GTY_STAT_RX_RST_DONE_3, 1, 7, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_BUF_STAT_0, 2, 8, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_BUF_STAT_1, 2, 10, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_BUF_STAT_2, 2, 12, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_BUF_STAT_3, 2, 14, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_RST_DONE_0, 1, 0, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_RST_DONE_1, 1, 1, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_RST_DONE_2, 1, 2, 0x0000 }, - { MAC_PCS_GTY_STAT_TX_RST_DONE_3, 1, 3, 0x0000 }, +static nthw_fpga_register_init_s rpl_registers[] = { + { RPL_EXT_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, rpl_ext_ctrl_fields }, + { RPL_EXT_DATA, 3, 12, NTHW_FPGA_REG_TYPE_WO, 0, 1, rpl_ext_data_fields }, + { RPL_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, rpl_rcp_ctrl_fields }, + { RPL_RCP_DATA, 1, 37, NTHW_FPGA_REG_TYPE_WO, 0, 6, rpl_rcp_data_fields }, + { RPL_RPL_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, rpl_rpl_ctrl_fields }, + { RPL_RPL_DATA, 5, 128, NTHW_FPGA_REG_TYPE_WO, 0, 1, rpl_rpl_data_fields }, }; -static nthw_fpga_field_init_s mac_pcs_link_summary_fields[] = { - { MAC_PCS_LINK_SUMMARY_ABS, 1, 0, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_LH_ABS, 1, 2, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_LH_LOCAL_FAULT, 1, 13, 0 }, - { MAC_PCS_LINK_SUMMARY_LH_REMOTE_FAULT, 1, 14, 0 }, - { MAC_PCS_LINK_SUMMARY_LINK_DOWN_CNT, 8, 4, 0 }, - { MAC_PCS_LINK_SUMMARY_LL_PHY_LINK_STATE, 1, 3, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_LOCAL_FAULT, 1, 17, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_NIM_INTERR, 1, 12, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_NT_PHY_LINK_STATE, 1, 1, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_REMOTE_FAULT, 1, 18, 0x0000 }, - { MAC_PCS_LINK_SUMMARY_RESERVED, 2, 15, 0 }, +static nthw_fpga_field_init_s rpp_lr_ifr_rcp_ctrl_fields[] = { + { RPP_LR_IFR_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { RPP_LR_IFR_RCP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_mac_pcs_config_fields[] = { - { MAC_PCS_MAC_PCS_CONFIG_RX_CORE_RST, 1, 3, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_RX_ENABLE, 1, 5, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_RX_FORCE_RESYNC, 1, 6, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_RX_PATH_RST, 1, 1, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_RX_TEST_PATTERN, 1, 7, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_CORE_RST, 1, 2, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_ENABLE, 1, 8, 1 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_FCS_REMOVE, 1, 4, 1 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_PATH_RST, 1, 0, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_SEND_IDLE, 1, 9, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_SEND_RFI, 1, 10, 0 }, - { MAC_PCS_MAC_PCS_CONFIG_TX_TEST_PATTERN, 1, 11, 0 }, +static nthw_fpga_field_init_s rpp_lr_ifr_rcp_data_fields[] = { + { RPP_LR_IFR_RCP_DATA_IPV4_DF_DROP, 1, 17, 0x0000 }, + { RPP_LR_IFR_RCP_DATA_IPV4_EN, 1, 0, 0x0000 }, + { RPP_LR_IFR_RCP_DATA_IPV6_DROP, 1, 16, 0x0000 }, + { RPP_LR_IFR_RCP_DATA_IPV6_EN, 1, 1, 0x0000 }, + { RPP_LR_IFR_RCP_DATA_MTU, 14, 2, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_max_pkt_len_fields[] = { - { MAC_PCS_MAX_PKT_LEN_MAX_LEN, 14, 0, 10000 }, +static nthw_fpga_field_init_s rpp_lr_rcp_ctrl_fields[] = { + { RPP_LR_RCP_CTRL_ADR, 4, 0, 0x0000 }, + { RPP_LR_RCP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_phymac_misc_fields[] = { - { MAC_PCS_PHYMAC_MISC_TS_EOP, 1, 3, 1 }, - { MAC_PCS_PHYMAC_MISC_TX_MUX_STATE, 4, 4, 0x0000 }, - { MAC_PCS_PHYMAC_MISC_TX_SEL_HOST, 1, 0, 1 }, - { MAC_PCS_PHYMAC_MISC_TX_SEL_RX_LOOP, 1, 2, 0 }, - { MAC_PCS_PHYMAC_MISC_TX_SEL_TFG, 1, 1, 0 }, +static nthw_fpga_field_init_s rpp_lr_rcp_data_fields[] = { + { RPP_LR_RCP_DATA_EXP, 14, 0, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_phy_stat_fields[] = { - { MAC_PCS_PHY_STAT_ALARM, 1, 2, 0x0000 }, - { MAC_PCS_PHY_STAT_MOD_PRS, 1, 1, 0x0000 }, - { MAC_PCS_PHY_STAT_RX_LOS, 1, 0, 0x0000 }, +static nthw_fpga_register_init_s rpp_lr_registers[] = { + { RPP_LR_IFR_RCP_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, rpp_lr_ifr_rcp_ctrl_fields }, + { RPP_LR_IFR_RCP_DATA, 3, 18, NTHW_FPGA_REG_TYPE_WO, 0, 5, rpp_lr_ifr_rcp_data_fields }, + { RPP_LR_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, rpp_lr_rcp_ctrl_fields }, + { RPP_LR_RCP_DATA, 1, 14, NTHW_FPGA_REG_TYPE_WO, 0, 1, rpp_lr_rcp_data_fields }, }; -static nthw_fpga_field_init_s mac_pcs_stat_pcs_rx_fields[] = { - { MAC_PCS_STAT_PCS_RX_ALIGNED, 1, 1, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_ALIGNED_ERR, 1, 2, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_GOT_SIGNAL_OS, 1, 9, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_HI_BER, 1, 8, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LOCAL_FAULT, 1, 6, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_MISALIGNED, 1, 3, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_REMOTE_FAULT, 1, 7, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_STATUS, 1, 0, 0x0000 }, +static nthw_fpga_field_init_s rst9563_ctrl_fields[] = { + { RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 }, + { RST9563_CTRL_TS_CLKSEL, 1, 1, 1 }, + { RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 }, }; -static nthw_fpga_field_init_s mac_pcs_stat_pcs_rx_latch_fields[] = { - { MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED, 1, 1, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_ALIGNED_ERR, 1, 2, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_GOT_SIGNAL_OS, 1, 9, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_HI_BER, 1, 8, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_INTERNAL_LOCAL_FAULT, 1, 4, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_LOCAL_FAULT, 1, 6, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_MISALIGNED, 1, 3, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_RECEIVED_LOCAL_FAULT, 1, 5, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_REMOTE_FAULT, 1, 7, 0x0000 }, - { MAC_PCS_STAT_PCS_RX_LATCH_STATUS, 1, 0, 0x0000 }, +static nthw_fpga_field_init_s rst9563_power_fields[] = { + { RST9563_POWER_PU_NSEB, 1, 1, 0 }, + { RST9563_POWER_PU_PHY, 1, 0, 0 }, +}; + +static nthw_fpga_field_init_s rst9563_rst_fields[] = { + { RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 }, + { RST9563_RST_MAC_RX, 2, 9, 3 }, { RST9563_RST_PERIPH, 1, 13, 0 }, + { RST9563_RST_PHY, 2, 7, 3 }, { RST9563_RST_PTP, 1, 11, 1 }, + { RST9563_RST_PTP_MMCM, 1, 16, 0 }, { RST9563_RST_RPP, 1, 2, 1 }, + { RST9563_RST_SDC, 1, 6, 1 }, { RST9563_RST_SYS, 1, 0, 1 }, + { RST9563_RST_SYS_MMCM, 1, 14, 0 }, { RST9563_RST_TMC, 1, 1, 1 }, + { RST9563_RST_TS, 1, 12, 1 }, { RST9563_RST_TS_MMCM, 1, 17, 0 }, }; -static nthw_fpga_field_init_s mac_pcs_stat_pcs_tx_fields[] = { - { MAC_PCS_STAT_PCS_TX_LOCAL_FAULT, 1, 0, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_LOCAL_FAULT_CHANGED, 1, 5, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR, 1, 4, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_PTP_FIFO_READ_ERROR_CHANGED, 1, 9, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR, 1, 3, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_PTP_FIFO_WRITE_ERROR_CHANGED, 1, 8, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_TX_OVFOUT, 1, 2, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_TX_OVFOUT_CHANGED, 1, 7, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_TX_UNFOUT, 1, 1, 0x0000 }, - { MAC_PCS_STAT_PCS_TX_TX_UNFOUT_CHANGED, 1, 6, 0x0000 }, +static nthw_fpga_field_init_s rst9563_stat_fields[] = { + { RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 }, + { RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 }, + { RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 }, + { RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 }, + { RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 }, + { RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_synced_fields[] = { - { MAC_PCS_SYNCED_SYNC, 20, 0, 0x0000 }, +static nthw_fpga_field_init_s rst9563_sticky_fields[] = { + { RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 }, + { RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 }, + { RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 }, + { RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 }, + { RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 }, + { RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_synced_err_fields[] = { - { MAC_PCS_SYNCED_ERR_SYNC_ERROR, 20, 0, 0x0000 }, +static nthw_fpga_register_init_s rst9563_registers[] = { + { RST9563_CTRL, 1, 3, NTHW_FPGA_REG_TYPE_RW, 7, 3, rst9563_ctrl_fields }, + { RST9563_POWER, 4, 2, NTHW_FPGA_REG_TYPE_RW, 0, 2, rst9563_power_fields }, + { RST9563_RST, 0, 18, NTHW_FPGA_REG_TYPE_RW, 8191, 14, rst9563_rst_fields }, + { RST9563_STAT, 2, 6, NTHW_FPGA_REG_TYPE_RO, 0, 6, rst9563_stat_fields }, + { RST9563_STICKY, 3, 6, NTHW_FPGA_REG_TYPE_RC1, 0, 6, rst9563_sticky_fields }, }; -static nthw_fpga_field_init_s mac_pcs_test_err_fields[] = { - { MAC_PCS_TEST_ERR_CODE_ERR, 16, 0, 0x0000 }, +static nthw_fpga_field_init_s slc_rcp_ctrl_fields[] = { + { SLC_RCP_CTRL_ADR, 6, 0, 0x0000 }, + { SLC_RCP_CTRL_CNT, 16, 16, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_timestamp_comp_fields[] = { - { MAC_PCS_TIMESTAMP_COMP_RX_DLY, 16, 0, 1451 }, - { MAC_PCS_TIMESTAMP_COMP_TX_DLY, 16, 16, 1440 }, +static nthw_fpga_field_init_s slc_rcp_data_fields[] = { + { SLC_RCP_DATA_HEAD_DYN, 5, 1, 0x0000 }, { SLC_RCP_DATA_HEAD_OFS, 8, 6, 0x0000 }, + { SLC_RCP_DATA_HEAD_SLC_EN, 1, 0, 0x0000 }, { SLC_RCP_DATA_PCAP, 1, 35, 0x0000 }, + { SLC_RCP_DATA_TAIL_DYN, 5, 15, 0x0000 }, { SLC_RCP_DATA_TAIL_OFS, 15, 20, 0x0000 }, + { SLC_RCP_DATA_TAIL_SLC_EN, 1, 14, 0x0000 }, }; -static nthw_fpga_field_init_s mac_pcs_vl_demuxed_fields[] = { - { MAC_PCS_VL_DEMUXED_LOCK, 20, 0, 0x0000 }, +static nthw_fpga_register_init_s slc_registers[] = { + { SLC_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, slc_rcp_ctrl_fields }, + { SLC_RCP_DATA, 1, 36, NTHW_FPGA_REG_TYPE_WO, 0, 7, slc_rcp_data_fields }, }; -static nthw_fpga_field_init_s mac_pcs_vl_demuxed_chg_fields[] = { - { MAC_PCS_VL_DEMUXED_CHG_LOCK_CHG, 20, 0, 0x0000 }, +static nthw_fpga_field_init_s sta_byte_fields[] = { + { STA_BYTE_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_register_init_s mac_pcs_registers[] = { - { MAC_PCS_BAD_CODE, 26, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_bad_code_fields }, - { MAC_PCS_BIP_ERR, 31, 640, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_bip_err_fields }, - { MAC_PCS_BLOCK_LOCK, 27, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_block_lock_fields }, - { - MAC_PCS_BLOCK_LOCK_CHG, 28, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_block_lock_chg_fields - }, - { - MAC_PCS_DEBOUNCE_CTRL, 1, 19, NTHW_FPGA_REG_TYPE_RW, 264714, 4, - mac_pcs_debounce_ctrl_fields - }, - { MAC_PCS_DRP_CTRL, 43, 32, NTHW_FPGA_REG_TYPE_MIXED, 0, 6, mac_pcs_drp_ctrl_fields }, - { MAC_PCS_FEC_CTRL, 2, 5, NTHW_FPGA_REG_TYPE_RW, 0, 1, mac_pcs_fec_ctrl_fields }, - { MAC_PCS_FEC_CW_CNT, 9, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_fec_cw_cnt_fields }, - { - MAC_PCS_FEC_ERR_CNT_0, 11, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_err_cnt_0_fields - }, - { - MAC_PCS_FEC_ERR_CNT_1, 12, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_err_cnt_1_fields - }, - { - MAC_PCS_FEC_ERR_CNT_2, 13, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_err_cnt_2_fields - }, - { - MAC_PCS_FEC_ERR_CNT_3, 14, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_err_cnt_3_fields - }, - { - MAC_PCS_FEC_LANE_DLY_0, 5, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_lane_dly_0_fields - }, - { - MAC_PCS_FEC_LANE_DLY_1, 6, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_lane_dly_1_fields - }, - { - MAC_PCS_FEC_LANE_DLY_2, 7, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_lane_dly_2_fields - }, - { - MAC_PCS_FEC_LANE_DLY_3, 8, 14, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_fec_lane_dly_3_fields - }, - { MAC_PCS_FEC_LANE_MAP, 4, 8, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_fec_lane_map_fields }, - { MAC_PCS_FEC_STAT, 3, 11, NTHW_FPGA_REG_TYPE_RO, 0, 11, mac_pcs_fec_stat_fields }, - { MAC_PCS_FEC_UCW_CNT, 10, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_fec_ucw_cnt_fields }, - { MAC_PCS_GTY_CTL_RX, 38, 28, NTHW_FPGA_REG_TYPE_RW, 0, 20, mac_pcs_gty_ctl_rx_fields }, - { MAC_PCS_GTY_CTL_TX, 39, 8, NTHW_FPGA_REG_TYPE_RW, 0, 8, mac_pcs_gty_ctl_tx_fields }, - { - MAC_PCS_GTY_DIFF_CTL, 35, 20, NTHW_FPGA_REG_TYPE_RW, 811800, 4, - mac_pcs_gty_diff_ctl_fields - }, - { MAC_PCS_GTY_LOOP, 20, 12, NTHW_FPGA_REG_TYPE_RW, 0, 4, mac_pcs_gty_loop_fields }, - { - MAC_PCS_GTY_POST_CURSOR, 36, 20, NTHW_FPGA_REG_TYPE_RW, 676500, 4, - mac_pcs_gty_post_cursor_fields - }, - { MAC_PCS_GTY_PRBS_SEL, 40, 32, NTHW_FPGA_REG_TYPE_RW, 0, 8, mac_pcs_gty_prbs_sel_fields }, - { - MAC_PCS_GTY_PRE_CURSOR, 37, 20, NTHW_FPGA_REG_TYPE_RW, 0, 4, - mac_pcs_gty_pre_cursor_fields - }, - { - MAC_PCS_GTY_RX_BUF_STAT, 34, 24, NTHW_FPGA_REG_TYPE_RO, 0, 8, - mac_pcs_gty_rx_buf_stat_fields - }, - { - MAC_PCS_GTY_SCAN_CTL, 41, 16, NTHW_FPGA_REG_TYPE_RW, 0, 16, - mac_pcs_gty_scan_ctl_fields - }, - { - MAC_PCS_GTY_SCAN_STAT, 42, 8, NTHW_FPGA_REG_TYPE_RO, 0, 8, - mac_pcs_gty_scan_stat_fields - }, - { MAC_PCS_GTY_STAT, 33, 16, NTHW_FPGA_REG_TYPE_RO, 0, 12, mac_pcs_gty_stat_fields }, - { MAC_PCS_LINK_SUMMARY, 0, 19, NTHW_FPGA_REG_TYPE_RO, 0, 11, mac_pcs_link_summary_fields }, - { - MAC_PCS_MAC_PCS_CONFIG, 19, 12, NTHW_FPGA_REG_TYPE_RW, 272, 12, - mac_pcs_mac_pcs_config_fields - }, - { - MAC_PCS_MAX_PKT_LEN, 17, 14, NTHW_FPGA_REG_TYPE_RW, 10000, 1, - mac_pcs_max_pkt_len_fields - }, - { MAC_PCS_PHYMAC_MISC, 16, 8, NTHW_FPGA_REG_TYPE_MIXED, 9, 5, mac_pcs_phymac_misc_fields }, - { MAC_PCS_PHY_STAT, 15, 3, NTHW_FPGA_REG_TYPE_RO, 0, 3, mac_pcs_phy_stat_fields }, - { MAC_PCS_STAT_PCS_RX, 21, 10, NTHW_FPGA_REG_TYPE_RO, 0, 10, mac_pcs_stat_pcs_rx_fields }, - { - MAC_PCS_STAT_PCS_RX_LATCH, 22, 10, NTHW_FPGA_REG_TYPE_RO, 0, 10, - mac_pcs_stat_pcs_rx_latch_fields - }, - { MAC_PCS_STAT_PCS_TX, 23, 10, NTHW_FPGA_REG_TYPE_RO, 0, 10, mac_pcs_stat_pcs_tx_fields }, - { MAC_PCS_SYNCED, 24, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_synced_fields }, - { MAC_PCS_SYNCED_ERR, 25, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_synced_err_fields }, - { MAC_PCS_TEST_ERR, 32, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_test_err_fields }, - { - MAC_PCS_TIMESTAMP_COMP, 18, 32, NTHW_FPGA_REG_TYPE_RW, 94373291, 2, - mac_pcs_timestamp_comp_fields - }, - { MAC_PCS_VL_DEMUXED, 29, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, mac_pcs_vl_demuxed_fields }, - { - MAC_PCS_VL_DEMUXED_CHG, 30, 20, NTHW_FPGA_REG_TYPE_RO, 0, 1, - mac_pcs_vl_demuxed_chg_fields - }, +static nthw_fpga_field_init_s sta_cfg_fields[] = { + { STA_CFG_CNT_CLEAR, 1, 1, 0 }, + { STA_CFG_DMA_ENA, 1, 0, 0 }, }; -static nthw_fpga_field_init_s pci_rd_tg_tg_ctrl_fields[] = { - { PCI_RD_TG_TG_CTRL_TG_RD_RDY, 1, 0, 0 }, +static nthw_fpga_field_init_s sta_cv_err_fields[] = { + { STA_CV_ERR_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_rd_tg_tg_rdaddr_fields[] = { - { PCI_RD_TG_TG_RDADDR_RAM_ADDR, 9, 0, 0 }, +static nthw_fpga_field_init_s sta_fcs_err_fields[] = { + { STA_FCS_ERR_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_rd_tg_tg_rddata0_fields[] = { - { PCI_RD_TG_TG_RDDATA0_PHYS_ADDR_LOW, 32, 0, 0 }, +static nthw_fpga_field_init_s sta_host_adr_lsb_fields[] = { + { STA_HOST_ADR_LSB_LSB, 32, 0, 0 }, }; -static nthw_fpga_field_init_s pci_rd_tg_tg_rddata1_fields[] = { - { PCI_RD_TG_TG_RDDATA1_PHYS_ADDR_HIGH, 32, 0, 0 }, +static nthw_fpga_field_init_s sta_host_adr_msb_fields[] = { + { STA_HOST_ADR_MSB_MSB, 32, 0, 0 }, }; -static nthw_fpga_field_init_s pci_rd_tg_tg_rddata2_fields[] = { - { PCI_RD_TG_TG_RDDATA2_REQ_HID, 6, 22, 0 }, - { PCI_RD_TG_TG_RDDATA2_REQ_SIZE, 22, 0, 0 }, - { PCI_RD_TG_TG_RDDATA2_WAIT, 1, 30, 0 }, - { PCI_RD_TG_TG_RDDATA2_WRAP, 1, 31, 0 }, +static nthw_fpga_field_init_s sta_load_bin_fields[] = { + { STA_LOAD_BIN_BIN, 32, 0, 8388607 }, }; -static nthw_fpga_field_init_s pci_rd_tg_tg_rd_run_fields[] = { - { PCI_RD_TG_TG_RD_RUN_RD_ITERATION, 16, 0, 0 }, +static nthw_fpga_field_init_s sta_load_bps_rx_0_fields[] = { + { STA_LOAD_BPS_RX_0_BPS, 32, 0, 0x0000 }, }; -static nthw_fpga_register_init_s pci_rd_tg_registers[] = { - { PCI_RD_TG_TG_CTRL, 5, 1, NTHW_FPGA_REG_TYPE_RO, 0, 1, pci_rd_tg_tg_ctrl_fields }, - { PCI_RD_TG_TG_RDADDR, 3, 9, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rdaddr_fields }, - { PCI_RD_TG_TG_RDDATA0, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rddata0_fields }, - { PCI_RD_TG_TG_RDDATA1, 1, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rddata1_fields }, - { PCI_RD_TG_TG_RDDATA2, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 4, pci_rd_tg_tg_rddata2_fields }, - { PCI_RD_TG_TG_RD_RUN, 4, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_rd_tg_tg_rd_run_fields }, +static nthw_fpga_field_init_s sta_load_bps_rx_1_fields[] = { + { STA_LOAD_BPS_RX_1_BPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_ctrl_fields[] = { - { PCI_WR_TG_TG_CTRL_TG_WR_RDY, 1, 0, 0 }, +static nthw_fpga_field_init_s sta_load_bps_tx_0_fields[] = { + { STA_LOAD_BPS_TX_0_BPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_seq_fields[] = { - { PCI_WR_TG_TG_SEQ_SEQUENCE, 16, 0, 0 }, +static nthw_fpga_field_init_s sta_load_bps_tx_1_fields[] = { + { STA_LOAD_BPS_TX_1_BPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_wraddr_fields[] = { - { PCI_WR_TG_TG_WRADDR_RAM_ADDR, 9, 0, 0 }, +static nthw_fpga_field_init_s sta_load_pps_rx_0_fields[] = { + { STA_LOAD_PPS_RX_0_PPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_wrdata0_fields[] = { - { PCI_WR_TG_TG_WRDATA0_PHYS_ADDR_LOW, 32, 0, 0 }, +static nthw_fpga_field_init_s sta_load_pps_rx_1_fields[] = { + { STA_LOAD_PPS_RX_1_PPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_wrdata1_fields[] = { - { PCI_WR_TG_TG_WRDATA1_PHYS_ADDR_HIGH, 32, 0, 0 }, +static nthw_fpga_field_init_s sta_load_pps_tx_0_fields[] = { + { STA_LOAD_PPS_TX_0_PPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_wrdata2_fields[] = { - { PCI_WR_TG_TG_WRDATA2_INC_MODE, 1, 29, 0 }, { PCI_WR_TG_TG_WRDATA2_REQ_HID, 6, 22, 0 }, - { PCI_WR_TG_TG_WRDATA2_REQ_SIZE, 22, 0, 0 }, { PCI_WR_TG_TG_WRDATA2_WAIT, 1, 30, 0 }, - { PCI_WR_TG_TG_WRDATA2_WRAP, 1, 31, 0 }, +static nthw_fpga_field_init_s sta_load_pps_tx_1_fields[] = { + { STA_LOAD_PPS_TX_1_PPS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pci_wr_tg_tg_wr_run_fields[] = { - { PCI_WR_TG_TG_WR_RUN_WR_ITERATION, 16, 0, 0 }, +static nthw_fpga_field_init_s sta_pckt_fields[] = { + { STA_PCKT_CNT, 32, 0, 0x0000 }, }; -static nthw_fpga_register_init_s pci_wr_tg_registers[] = { - { PCI_WR_TG_TG_CTRL, 5, 1, NTHW_FPGA_REG_TYPE_RO, 0, 1, pci_wr_tg_tg_ctrl_fields }, - { PCI_WR_TG_TG_SEQ, 6, 16, NTHW_FPGA_REG_TYPE_RW, 0, 1, pci_wr_tg_tg_seq_fields }, - { PCI_WR_TG_TG_WRADDR, 3, 9, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wraddr_fields }, - { PCI_WR_TG_TG_WRDATA0, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wrdata0_fields }, - { PCI_WR_TG_TG_WRDATA1, 1, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wrdata1_fields }, - { PCI_WR_TG_TG_WRDATA2, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 5, pci_wr_tg_tg_wrdata2_fields }, - { PCI_WR_TG_TG_WR_RUN, 4, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, pci_wr_tg_tg_wr_run_fields }, +static nthw_fpga_field_init_s sta_status_fields[] = { + { STA_STATUS_STAT_TOGGLE_MISSED, 1, 0, 0x0000 }, }; -static nthw_fpga_field_init_s pdb_config_fields[] = { - { PDB_CONFIG_PORT_OFS, 6, 3, 0 }, - { PDB_CONFIG_TS_FORMAT, 3, 0, 0 }, +static nthw_fpga_register_init_s sta_registers[] = { + { STA_BYTE, 4, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_byte_fields }, + { STA_CFG, 0, 2, NTHW_FPGA_REG_TYPE_RW, 0, 2, sta_cfg_fields }, + { STA_CV_ERR, 5, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_cv_err_fields }, + { STA_FCS_ERR, 6, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_fcs_err_fields }, + { STA_HOST_ADR_LSB, 1, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, sta_host_adr_lsb_fields }, + { STA_HOST_ADR_MSB, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, sta_host_adr_msb_fields }, + { STA_LOAD_BIN, 8, 32, NTHW_FPGA_REG_TYPE_WO, 8388607, 1, sta_load_bin_fields }, + { STA_LOAD_BPS_RX_0, 11, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_bps_rx_0_fields }, + { STA_LOAD_BPS_RX_1, 13, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_bps_rx_1_fields }, + { STA_LOAD_BPS_TX_0, 15, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_bps_tx_0_fields }, + { STA_LOAD_BPS_TX_1, 17, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_bps_tx_1_fields }, + { STA_LOAD_PPS_RX_0, 10, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_pps_rx_0_fields }, + { STA_LOAD_PPS_RX_1, 12, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_pps_rx_1_fields }, + { STA_LOAD_PPS_TX_0, 14, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_pps_tx_0_fields }, + { STA_LOAD_PPS_TX_1, 16, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_load_pps_tx_1_fields }, + { STA_PCKT, 3, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, sta_pckt_fields }, + { STA_STATUS, 7, 1, NTHW_FPGA_REG_TYPE_RC1, 0, 1, sta_status_fields }, }; -static nthw_fpga_field_init_s pdb_rcp_ctrl_fields[] = { - { PDB_RCP_CTRL_ADR, 4, 0, 0x0000 }, - { PDB_RCP_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_con0_config_fields[] = { + { TSM_CON0_CONFIG_BLIND, 5, 8, 9 }, { TSM_CON0_CONFIG_DC_SRC, 3, 5, 0 }, + { TSM_CON0_CONFIG_PORT, 3, 0, 0 }, { TSM_CON0_CONFIG_PPSIN_2_5V, 1, 13, 0 }, + { TSM_CON0_CONFIG_SAMPLE_EDGE, 2, 3, 2 }, }; -static nthw_fpga_field_init_s pdb_rcp_data_fields[] = { - { PDB_RCP_DATA_ALIGN, 1, 17, 0x0000 }, - { PDB_RCP_DATA_CRC_OVERWRITE, 1, 16, 0x0000 }, - { PDB_RCP_DATA_DESCRIPTOR, 4, 0, 0x0000 }, - { PDB_RCP_DATA_DESC_LEN, 5, 4, 0 }, - { PDB_RCP_DATA_DUPLICATE_BIT, 5, 61, 0x0000 }, - { PDB_RCP_DATA_DUPLICATE_EN, 1, 60, 0x0000 }, - { PDB_RCP_DATA_IP_PROT_TNL, 1, 57, 0x0000 }, - { PDB_RCP_DATA_OFS0_DYN, 5, 18, 0x0000 }, - { PDB_RCP_DATA_OFS0_REL, 8, 23, 0x0000 }, - { PDB_RCP_DATA_OFS1_DYN, 5, 31, 0x0000 }, - { PDB_RCP_DATA_OFS1_REL, 8, 36, 0x0000 }, - { PDB_RCP_DATA_OFS2_DYN, 5, 44, 0x0000 }, - { PDB_RCP_DATA_OFS2_REL, 8, 49, 0x0000 }, - { PDB_RCP_DATA_PCAP_KEEP_FCS, 1, 66, 0x0000 }, - { PDB_RCP_DATA_PPC_HSH, 2, 58, 0x0000 }, - { PDB_RCP_DATA_TX_IGNORE, 1, 14, 0x0000 }, - { PDB_RCP_DATA_TX_NOW, 1, 15, 0x0000 }, - { PDB_RCP_DATA_TX_PORT, 5, 9, 0x0000 }, +static nthw_fpga_field_init_s tsm_con0_interface_fields[] = { + { TSM_CON0_INTERFACE_EX_TERM, 2, 0, 3 }, { TSM_CON0_INTERFACE_IN_REF_PWM, 8, 12, 128 }, + { TSM_CON0_INTERFACE_PWM_ENA, 1, 2, 0 }, { TSM_CON0_INTERFACE_RESERVED, 1, 3, 0 }, + { TSM_CON0_INTERFACE_VTERM_PWM, 8, 4, 0 }, }; -static nthw_fpga_register_init_s pdb_registers[] = { - { PDB_CONFIG, 2, 10, NTHW_FPGA_REG_TYPE_WO, 0, 2, pdb_config_fields }, - { PDB_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, pdb_rcp_ctrl_fields }, - { PDB_RCP_DATA, 1, 67, NTHW_FPGA_REG_TYPE_WO, 0, 18, pdb_rcp_data_fields }, +static nthw_fpga_field_init_s tsm_con0_sample_hi_fields[] = { + { TSM_CON0_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s qsl_qen_ctrl_fields[] = { - { QSL_QEN_CTRL_ADR, 5, 0, 0x0000 }, - { QSL_QEN_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_con0_sample_lo_fields[] = { + { TSM_CON0_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s qsl_qen_data_fields[] = { - { QSL_QEN_DATA_EN, 4, 0, 0x0000 }, +static nthw_fpga_field_init_s tsm_con1_config_fields[] = { + { TSM_CON1_CONFIG_BLIND, 5, 8, 9 }, { TSM_CON1_CONFIG_DC_SRC, 3, 5, 0 }, + { TSM_CON1_CONFIG_PORT, 3, 0, 0 }, { TSM_CON1_CONFIG_PPSIN_2_5V, 1, 13, 0 }, + { TSM_CON1_CONFIG_SAMPLE_EDGE, 2, 3, 2 }, }; -static nthw_fpga_field_init_s qsl_qst_ctrl_fields[] = { - { QSL_QST_CTRL_ADR, 12, 0, 0x0000 }, - { QSL_QST_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_con1_sample_hi_fields[] = { + { TSM_CON1_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s qsl_qst_data_fields[] = { - { QSL_QST_DATA_LRE, 1, 9, 0x0000 }, { QSL_QST_DATA_QEN, 1, 7, 0x0000 }, - { QSL_QST_DATA_QUEUE, 7, 0, 0x0000 }, { QSL_QST_DATA_TCI, 16, 10, 0x0000 }, - { QSL_QST_DATA_TX_PORT, 1, 8, 0x0000 }, { QSL_QST_DATA_VEN, 1, 26, 0x0000 }, +static nthw_fpga_field_init_s tsm_con1_sample_lo_fields[] = { + { TSM_CON1_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s qsl_rcp_ctrl_fields[] = { - { QSL_RCP_CTRL_ADR, 5, 0, 0x0000 }, - { QSL_RCP_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_con2_config_fields[] = { + { TSM_CON2_CONFIG_BLIND, 5, 8, 9 }, { TSM_CON2_CONFIG_DC_SRC, 3, 5, 0 }, + { TSM_CON2_CONFIG_PORT, 3, 0, 0 }, { TSM_CON2_CONFIG_PPSIN_2_5V, 1, 13, 0 }, + { TSM_CON2_CONFIG_SAMPLE_EDGE, 2, 3, 2 }, }; - -static nthw_fpga_field_init_s qsl_rcp_data_fields[] = { - { QSL_RCP_DATA_DISCARD, 1, 0, 0x0000 }, { QSL_RCP_DATA_DROP, 2, 1, 0x0000 }, - { QSL_RCP_DATA_LR, 2, 51, 0x0000 }, { QSL_RCP_DATA_TBL_HI, 12, 15, 0x0000 }, - { QSL_RCP_DATA_TBL_IDX, 12, 27, 0x0000 }, { QSL_RCP_DATA_TBL_LO, 12, 3, 0x0000 }, - { QSL_RCP_DATA_TBL_MSK, 12, 39, 0x0000 }, { QSL_RCP_DATA_TSA, 1, 53, 0x0000 }, - { QSL_RCP_DATA_VLI, 2, 54, 0x0000 }, + +static nthw_fpga_field_init_s tsm_con2_sample_hi_fields[] = { + { TSM_CON2_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s qsl_unmq_ctrl_fields[] = { - { QSL_UNMQ_CTRL_ADR, 1, 0, 0x0000 }, - { QSL_UNMQ_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_con2_sample_lo_fields[] = { + { TSM_CON2_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s qsl_unmq_data_fields[] = { - { QSL_UNMQ_DATA_DEST_QUEUE, 7, 0, 0x0000 }, - { QSL_UNMQ_DATA_EN, 1, 7, 0x0000 }, +static nthw_fpga_field_init_s tsm_con3_config_fields[] = { + { TSM_CON3_CONFIG_BLIND, 5, 5, 26 }, + { TSM_CON3_CONFIG_PORT, 3, 0, 1 }, + { TSM_CON3_CONFIG_SAMPLE_EDGE, 2, 3, 1 }, }; -static nthw_fpga_register_init_s qsl_registers[] = { - { QSL_QEN_CTRL, 4, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_qen_ctrl_fields }, - { QSL_QEN_DATA, 5, 4, NTHW_FPGA_REG_TYPE_WO, 0, 1, qsl_qen_data_fields }, - { QSL_QST_CTRL, 2, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_qst_ctrl_fields }, - { QSL_QST_DATA, 3, 27, NTHW_FPGA_REG_TYPE_WO, 0, 6, qsl_qst_data_fields }, - { QSL_RCP_CTRL, 0, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_rcp_ctrl_fields }, - { QSL_RCP_DATA, 1, 56, NTHW_FPGA_REG_TYPE_WO, 0, 9, qsl_rcp_data_fields }, - { QSL_UNMQ_CTRL, 6, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_unmq_ctrl_fields }, - { QSL_UNMQ_DATA, 7, 8, NTHW_FPGA_REG_TYPE_WO, 0, 2, qsl_unmq_data_fields }, +static nthw_fpga_field_init_s tsm_con3_sample_hi_fields[] = { + { TSM_CON3_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_dbg_ctrl_fields[] = { - { RAC_DBG_CTRL_C, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s tsm_con3_sample_lo_fields[] = { + { TSM_CON3_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_dbg_data_fields[] = { - { RAC_DBG_DATA_D, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s tsm_con4_config_fields[] = { + { TSM_CON4_CONFIG_BLIND, 5, 5, 26 }, + { TSM_CON4_CONFIG_PORT, 3, 0, 1 }, + { TSM_CON4_CONFIG_SAMPLE_EDGE, 2, 3, 1 }, }; -static nthw_fpga_field_init_s rac_rab_buf_free_fields[] = { - { RAC_RAB_BUF_FREE_IB_FREE, 9, 0, 511 }, { RAC_RAB_BUF_FREE_IB_OVF, 1, 12, 0 }, - { RAC_RAB_BUF_FREE_OB_FREE, 9, 16, 511 }, { RAC_RAB_BUF_FREE_OB_OVF, 1, 28, 0 }, - { RAC_RAB_BUF_FREE_TIMEOUT, 1, 31, 0 }, +static nthw_fpga_field_init_s tsm_con4_sample_hi_fields[] = { + { TSM_CON4_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_buf_used_fields[] = { - { RAC_RAB_BUF_USED_FLUSH, 1, 31, 0 }, - { RAC_RAB_BUF_USED_IB_USED, 9, 0, 0 }, - { RAC_RAB_BUF_USED_OB_USED, 9, 16, 0 }, +static nthw_fpga_field_init_s tsm_con4_sample_lo_fields[] = { + { TSM_CON4_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ib_hi_fields[] = { - { RAC_RAB_DMA_IB_HI_PHYADDR, 32, 0, 0 }, +static nthw_fpga_field_init_s tsm_con5_config_fields[] = { + { TSM_CON5_CONFIG_BLIND, 5, 5, 26 }, + { TSM_CON5_CONFIG_PORT, 3, 0, 1 }, + { TSM_CON5_CONFIG_SAMPLE_EDGE, 2, 3, 1 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ib_lo_fields[] = { - { RAC_RAB_DMA_IB_LO_PHYADDR, 32, 0, 0 }, +static nthw_fpga_field_init_s tsm_con5_sample_hi_fields[] = { + { TSM_CON5_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ib_rd_fields[] = { - { RAC_RAB_DMA_IB_RD_PTR, 16, 0, 0 }, +static nthw_fpga_field_init_s tsm_con5_sample_lo_fields[] = { + { TSM_CON5_SAMPLE_LO_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ib_wr_fields[] = { - { RAC_RAB_DMA_IB_WR_PTR, 16, 0, 0 }, +static nthw_fpga_field_init_s tsm_con6_config_fields[] = { + { TSM_CON6_CONFIG_BLIND, 5, 5, 26 }, + { TSM_CON6_CONFIG_PORT, 3, 0, 1 }, + { TSM_CON6_CONFIG_SAMPLE_EDGE, 2, 3, 1 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ob_hi_fields[] = { - { RAC_RAB_DMA_OB_HI_PHYADDR, 32, 0, 0 }, +static nthw_fpga_field_init_s tsm_con6_sample_hi_fields[] = { + { TSM_CON6_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ob_lo_fields[] = { - { RAC_RAB_DMA_OB_LO_PHYADDR, 32, 0, 0 }, +static nthw_fpga_field_init_s tsm_con6_sample_lo_fields[] = { + { TSM_CON6_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_dma_ob_wr_fields[] = { - { RAC_RAB_DMA_OB_WR_PTR, 16, 0, 0 }, +static nthw_fpga_field_init_s tsm_con7_host_sample_hi_fields[] = { + { TSM_CON7_HOST_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_ib_data_fields[] = { - { RAC_RAB_IB_DATA_D, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s tsm_con7_host_sample_lo_fields[] = { + { TSM_CON7_HOST_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rac_rab_init_fields[] = { - { RAC_RAB_INIT_RAB, 3, 0, 7 }, +static nthw_fpga_field_init_s tsm_config_fields[] = { + { TSM_CONFIG_NTTS_SRC, 2, 5, 0 }, { TSM_CONFIG_NTTS_SYNC, 1, 4, 0 }, + { TSM_CONFIG_TIMESET_EDGE, 2, 8, 1 }, { TSM_CONFIG_TIMESET_SRC, 3, 10, 0 }, + { TSM_CONFIG_TIMESET_UP, 1, 7, 0 }, { TSM_CONFIG_TS_FORMAT, 4, 0, 1 }, }; -static nthw_fpga_field_init_s rac_rab_ob_data_fields[] = { - { RAC_RAB_OB_DATA_D, 32, 0, 0x0000 }, +static nthw_fpga_field_init_s tsm_int_config_fields[] = { + { TSM_INT_CONFIG_AUTO_DISABLE, 1, 0, 0 }, + { TSM_INT_CONFIG_MASK, 19, 1, 0 }, }; -static nthw_fpga_register_init_s rac_registers[] = { - { RAC_DBG_CTRL, 4200, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, rac_dbg_ctrl_fields }, - { RAC_DBG_DATA, 4208, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, rac_dbg_data_fields }, - { - RAC_RAB_BUF_FREE, 4176, 32, NTHW_FPGA_REG_TYPE_MIXED, 33489407, 5, - rac_rab_buf_free_fields - }, - { RAC_RAB_BUF_USED, 4184, 32, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, rac_rab_buf_used_fields }, - { RAC_RAB_DMA_IB_HI, 4360, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ib_hi_fields }, - { RAC_RAB_DMA_IB_LO, 4352, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ib_lo_fields }, - { RAC_RAB_DMA_IB_RD, 4424, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, rac_rab_dma_ib_rd_fields }, - { RAC_RAB_DMA_IB_WR, 4416, 16, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ib_wr_fields }, - { RAC_RAB_DMA_OB_HI, 4376, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ob_hi_fields }, - { RAC_RAB_DMA_OB_LO, 4368, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_dma_ob_lo_fields }, - { RAC_RAB_DMA_OB_WR, 4480, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, rac_rab_dma_ob_wr_fields }, - { RAC_RAB_IB_DATA, 4160, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, rac_rab_ib_data_fields }, - { RAC_RAB_INIT, 4192, 3, NTHW_FPGA_REG_TYPE_RW, 7, 1, rac_rab_init_fields }, - { RAC_RAB_OB_DATA, 4168, 32, NTHW_FPGA_REG_TYPE_RC1, 0, 1, rac_rab_ob_data_fields }, +static nthw_fpga_field_init_s tsm_int_stat_fields[] = { + { TSM_INT_STAT_CAUSE, 19, 1, 0 }, + { TSM_INT_STAT_ENABLE, 1, 0, 0 }, }; -static nthw_fpga_field_init_s rmc_ctrl_fields[] = { - { RMC_CTRL_BLOCK_KEEPA, 1, 1, 1 }, { RMC_CTRL_BLOCK_MAC_PORT, 2, 8, 3 }, - { RMC_CTRL_BLOCK_RPP_SLICE, 8, 10, 0 }, { RMC_CTRL_BLOCK_STATT, 1, 0, 1 }, - { RMC_CTRL_LAG_PHY_ODD_EVEN, 1, 24, 0 }, +static nthw_fpga_field_init_s tsm_led_fields[] = { + { TSM_LED_LED0_BG_COLOR, 2, 3, 0 }, { TSM_LED_LED0_COLOR, 2, 1, 0 }, + { TSM_LED_LED0_MODE, 1, 0, 0 }, { TSM_LED_LED0_SRC, 4, 5, 0 }, + { TSM_LED_LED1_BG_COLOR, 2, 12, 0 }, { TSM_LED_LED1_COLOR, 2, 10, 0 }, + { TSM_LED_LED1_MODE, 1, 9, 0 }, { TSM_LED_LED1_SRC, 4, 14, 1 }, + { TSM_LED_LED2_BG_COLOR, 2, 21, 0 }, { TSM_LED_LED2_COLOR, 2, 19, 0 }, + { TSM_LED_LED2_MODE, 1, 18, 0 }, { TSM_LED_LED2_SRC, 4, 23, 2 }, }; -static nthw_fpga_field_init_s rmc_dbg_fields[] = { - { RMC_DBG_MERGE, 31, 0, 0 }, +static nthw_fpga_field_init_s tsm_ntts_config_fields[] = { + { TSM_NTTS_CONFIG_AUTO_HARDSET, 1, 5, 1 }, + { TSM_NTTS_CONFIG_EXT_CLK_ADJ, 1, 6, 0 }, + { TSM_NTTS_CONFIG_HIGH_SAMPLE, 1, 4, 0 }, + { TSM_NTTS_CONFIG_TS_SRC_FORMAT, 4, 0, 0 }, }; -static nthw_fpga_field_init_s rmc_mac_if_fields[] = { - { RMC_MAC_IF_ERR, 31, 0, 0 }, +static nthw_fpga_field_init_s tsm_ntts_ext_stat_fields[] = { + { TSM_NTTS_EXT_STAT_MASTER_ID, 8, 16, 0x0000 }, + { TSM_NTTS_EXT_STAT_MASTER_REV, 8, 24, 0x0000 }, + { TSM_NTTS_EXT_STAT_MASTER_STAT, 16, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rmc_status_fields[] = { - { RMC_STATUS_DESCR_FIFO_OF, 1, 16, 0 }, - { RMC_STATUS_SF_RAM_OF, 1, 0, 0 }, +static nthw_fpga_field_init_s tsm_ntts_limit_hi_fields[] = { + { TSM_NTTS_LIMIT_HI_SEC, 16, 0, 0 }, }; -static nthw_fpga_register_init_s rmc_registers[] = { - { RMC_CTRL, 0, 25, NTHW_FPGA_REG_TYPE_RW, 771, 5, rmc_ctrl_fields }, - { RMC_DBG, 2, 31, NTHW_FPGA_REG_TYPE_RO, 0, 1, rmc_dbg_fields }, - { RMC_MAC_IF, 3, 31, NTHW_FPGA_REG_TYPE_RO, 0, 1, rmc_mac_if_fields }, - { RMC_STATUS, 1, 17, NTHW_FPGA_REG_TYPE_RO, 0, 2, rmc_status_fields }, +static nthw_fpga_field_init_s tsm_ntts_limit_lo_fields[] = { + { TSM_NTTS_LIMIT_LO_NS, 32, 0, 100000 }, }; -static nthw_fpga_field_init_s rst9563_ctrl_fields[] = { - { RST9563_CTRL_PTP_MMCM_CLKSEL, 1, 2, 1 }, - { RST9563_CTRL_TS_CLKSEL, 1, 1, 1 }, - { RST9563_CTRL_TS_CLKSEL_OVERRIDE, 1, 0, 1 }, +static nthw_fpga_field_init_s tsm_ntts_offset_fields[] = { + { TSM_NTTS_OFFSET_NS, 30, 0, 0 }, }; -static nthw_fpga_field_init_s rst9563_power_fields[] = { - { RST9563_POWER_PU_NSEB, 1, 1, 0 }, - { RST9563_POWER_PU_PHY, 1, 0, 0 }, +static nthw_fpga_field_init_s tsm_ntts_sample_hi_fields[] = { + { TSM_NTTS_SAMPLE_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rst9563_rst_fields[] = { - { RST9563_RST_CORE_MMCM, 1, 15, 0 }, { RST9563_RST_DDR4, 3, 3, 7 }, - { RST9563_RST_MAC_RX, 2, 9, 3 }, { RST9563_RST_PERIPH, 1, 13, 0 }, - { RST9563_RST_PHY, 2, 7, 3 }, { RST9563_RST_PTP, 1, 11, 1 }, - { RST9563_RST_PTP_MMCM, 1, 16, 0 }, { RST9563_RST_RPP, 1, 2, 1 }, - { RST9563_RST_SDC, 1, 6, 1 }, { RST9563_RST_SYS, 1, 0, 1 }, - { RST9563_RST_SYS_MMCM, 1, 14, 0 }, { RST9563_RST_TMC, 1, 1, 1 }, - { RST9563_RST_TS, 1, 12, 1 }, { RST9563_RST_TS_MMCM, 1, 17, 0 }, +static nthw_fpga_field_init_s tsm_ntts_sample_lo_fields[] = { + { TSM_NTTS_SAMPLE_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s rst9563_stat_fields[] = { - { RST9563_STAT_CORE_MMCM_LOCKED, 1, 5, 0x0000 }, - { RST9563_STAT_DDR4_MMCM_LOCKED, 1, 2, 0x0000 }, - { RST9563_STAT_DDR4_PLL_LOCKED, 1, 3, 0x0000 }, - { RST9563_STAT_PTP_MMCM_LOCKED, 1, 0, 0x0000 }, - { RST9563_STAT_SYS_MMCM_LOCKED, 1, 4, 0x0000 }, - { RST9563_STAT_TS_MMCM_LOCKED, 1, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_ntts_stat_fields[] = { + { TSM_NTTS_STAT_NTTS_VALID, 1, 0, 0 }, + { TSM_NTTS_STAT_SIGNAL_LOST, 8, 1, 0 }, + { TSM_NTTS_STAT_SYNC_LOST, 8, 9, 0 }, }; -static nthw_fpga_field_init_s rst9563_sticky_fields[] = { - { RST9563_STICKY_CORE_MMCM_UNLOCKED, 1, 5, 0x0000 }, - { RST9563_STICKY_DDR4_MMCM_UNLOCKED, 1, 2, 0x0000 }, - { RST9563_STICKY_DDR4_PLL_UNLOCKED, 1, 3, 0x0000 }, - { RST9563_STICKY_PTP_MMCM_UNLOCKED, 1, 0, 0x0000 }, - { RST9563_STICKY_SYS_MMCM_UNLOCKED, 1, 4, 0x0000 }, - { RST9563_STICKY_TS_MMCM_UNLOCKED, 1, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_ntts_ts_t0_hi_fields[] = { + { TSM_NTTS_TS_T0_HI_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_register_init_s rst9563_registers[] = { - { RST9563_CTRL, 1, 3, NTHW_FPGA_REG_TYPE_RW, 7, 3, rst9563_ctrl_fields }, - { RST9563_POWER, 4, 2, NTHW_FPGA_REG_TYPE_RW, 0, 2, rst9563_power_fields }, - { RST9563_RST, 0, 18, NTHW_FPGA_REG_TYPE_RW, 8191, 14, rst9563_rst_fields }, - { RST9563_STAT, 2, 6, NTHW_FPGA_REG_TYPE_RO, 0, 6, rst9563_stat_fields }, - { RST9563_STICKY, 3, 6, NTHW_FPGA_REG_TYPE_RC1, 0, 6, rst9563_sticky_fields }, +static nthw_fpga_field_init_s tsm_ntts_ts_t0_lo_fields[] = { + { TSM_NTTS_TS_T0_LO_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_rx_am_ctrl_fields[] = { - { DBS_RX_AM_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_RX_AM_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_ntts_ts_t0_offset_fields[] = { + { TSM_NTTS_TS_T0_OFFSET_COUNT, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_rx_am_data_fields[] = { - { DBS_RX_AM_DATA_ENABLE, 1, 72, 0x0000 }, { DBS_RX_AM_DATA_GPA, 64, 0, 0x0000 }, - { DBS_RX_AM_DATA_HID, 8, 64, 0x0000 }, { DBS_RX_AM_DATA_INT, 1, 74, 0x0000 }, - { DBS_RX_AM_DATA_PCKED, 1, 73, 0x0000 }, +static nthw_fpga_field_init_s tsm_pb_ctrl_fields[] = { + { TSM_PB_CTRL_INSTMEM_WR, 1, 1, 0 }, + { TSM_PB_CTRL_RST, 1, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_rx_control_fields[] = { - { DBS_RX_CONTROL_AME, 1, 7, 0 }, { DBS_RX_CONTROL_AMS, 4, 8, 8 }, - { DBS_RX_CONTROL_LQ, 7, 0, 0 }, { DBS_RX_CONTROL_QE, 1, 17, 0 }, - { DBS_RX_CONTROL_UWE, 1, 12, 0 }, { DBS_RX_CONTROL_UWS, 4, 13, 5 }, +static nthw_fpga_field_init_s tsm_pb_instmem_fields[] = { + { TSM_PB_INSTMEM_MEM_ADDR, 14, 0, 0 }, + { TSM_PB_INSTMEM_MEM_DATA, 18, 14, 0 }, }; -static nthw_fpga_field_init_s dbs_rx_dr_ctrl_fields[] = { - { DBS_RX_DR_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_RX_DR_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_pi_ctrl_i_fields[] = { + { TSM_PI_CTRL_I_VAL, 32, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_rx_dr_data_fields[] = { - { DBS_RX_DR_DATA_GPA, 64, 0, 0x0000 }, { DBS_RX_DR_DATA_HDR, 1, 88, 0x0000 }, - { DBS_RX_DR_DATA_HID, 8, 64, 0x0000 }, { DBS_RX_DR_DATA_PCKED, 1, 87, 0x0000 }, - { DBS_RX_DR_DATA_QS, 15, 72, 0x0000 }, +static nthw_fpga_field_init_s tsm_pi_ctrl_ki_fields[] = { + { TSM_PI_CTRL_KI_GAIN, 24, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_rx_idle_fields[] = { - { DBS_RX_IDLE_BUSY, 1, 8, 0 }, - { DBS_RX_IDLE_IDLE, 1, 0, 0x0000 }, - { DBS_RX_IDLE_QUEUE, 7, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_pi_ctrl_kp_fields[] = { + { TSM_PI_CTRL_KP_GAIN, 24, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_rx_init_fields[] = { - { DBS_RX_INIT_BUSY, 1, 8, 0 }, - { DBS_RX_INIT_INIT, 1, 0, 0x0000 }, - { DBS_RX_INIT_QUEUE, 7, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_pi_ctrl_shl_fields[] = { + { TSM_PI_CTRL_SHL_VAL, 4, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_rx_init_val_fields[] = { - { DBS_RX_INIT_VAL_IDX, 16, 0, 0x0000 }, - { DBS_RX_INIT_VAL_PTR, 15, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_stat_fields[] = { + { TSM_STAT_HARD_SYNC, 8, 8, 0 }, { TSM_STAT_LINK_CON0, 1, 0, 0 }, + { TSM_STAT_LINK_CON1, 1, 1, 0 }, { TSM_STAT_LINK_CON2, 1, 2, 0 }, + { TSM_STAT_LINK_CON3, 1, 3, 0 }, { TSM_STAT_LINK_CON4, 1, 4, 0 }, + { TSM_STAT_LINK_CON5, 1, 5, 0 }, { TSM_STAT_NTTS_INSYNC, 1, 6, 0 }, + { TSM_STAT_PTP_MI_PRESENT, 1, 7, 0 }, }; -static nthw_fpga_field_init_s dbs_rx_ptr_fields[] = { - { DBS_RX_PTR_PTR, 16, 0, 0x0000 }, - { DBS_RX_PTR_QUEUE, 7, 16, 0x0000 }, - { DBS_RX_PTR_VALID, 1, 23, 0x0000 }, +static nthw_fpga_field_init_s tsm_timer_ctrl_fields[] = { + { TSM_TIMER_CTRL_TIMER_EN_T0, 1, 0, 0 }, + { TSM_TIMER_CTRL_TIMER_EN_T1, 1, 1, 0 }, }; -static nthw_fpga_field_init_s dbs_rx_uw_ctrl_fields[] = { - { DBS_RX_UW_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_RX_UW_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_timer_t0_fields[] = { + { TSM_TIMER_T0_MAX_COUNT, 30, 0, 50000 }, }; -static nthw_fpga_field_init_s dbs_rx_uw_data_fields[] = { - { DBS_RX_UW_DATA_GPA, 64, 0, 0x0000 }, { DBS_RX_UW_DATA_HID, 8, 64, 0x0000 }, - { DBS_RX_UW_DATA_INT, 1, 88, 0x0000 }, { DBS_RX_UW_DATA_ISTK, 1, 92, 0x0000 }, - { DBS_RX_UW_DATA_PCKED, 1, 87, 0x0000 }, { DBS_RX_UW_DATA_QS, 15, 72, 0x0000 }, - { DBS_RX_UW_DATA_VEC, 3, 89, 0x0000 }, +static nthw_fpga_field_init_s tsm_timer_t1_fields[] = { + { TSM_TIMER_T1_MAX_COUNT, 30, 0, 50000 }, }; -static nthw_fpga_field_init_s dbs_tx_am_ctrl_fields[] = { - { DBS_TX_AM_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_TX_AM_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_time_hardset_hi_fields[] = { + { TSM_TIME_HARDSET_HI_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_tx_am_data_fields[] = { - { DBS_TX_AM_DATA_ENABLE, 1, 72, 0x0000 }, { DBS_TX_AM_DATA_GPA, 64, 0, 0x0000 }, - { DBS_TX_AM_DATA_HID, 8, 64, 0x0000 }, { DBS_TX_AM_DATA_INT, 1, 74, 0x0000 }, - { DBS_TX_AM_DATA_PCKED, 1, 73, 0x0000 }, +static nthw_fpga_field_init_s tsm_time_hardset_lo_fields[] = { + { TSM_TIME_HARDSET_LO_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_tx_control_fields[] = { - { DBS_TX_CONTROL_AME, 1, 7, 0 }, { DBS_TX_CONTROL_AMS, 4, 8, 5 }, - { DBS_TX_CONTROL_LQ, 7, 0, 0 }, { DBS_TX_CONTROL_QE, 1, 17, 0 }, - { DBS_TX_CONTROL_UWE, 1, 12, 0 }, { DBS_TX_CONTROL_UWS, 4, 13, 8 }, +static nthw_fpga_field_init_s tsm_time_hi_fields[] = { + { TSM_TIME_HI_SEC, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_tx_dr_ctrl_fields[] = { - { DBS_TX_DR_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_TX_DR_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_time_lo_fields[] = { + { TSM_TIME_LO_NS, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_tx_dr_data_fields[] = { - { DBS_TX_DR_DATA_GPA, 64, 0, 0x0000 }, { DBS_TX_DR_DATA_HDR, 1, 88, 0x0000 }, - { DBS_TX_DR_DATA_HID, 8, 64, 0x0000 }, { DBS_TX_DR_DATA_PCKED, 1, 87, 0x0000 }, - { DBS_TX_DR_DATA_PORT, 1, 89, 0x0000 }, { DBS_TX_DR_DATA_QS, 15, 72, 0x0000 }, +static nthw_fpga_field_init_s tsm_time_rate_adj_fields[] = { + { TSM_TIME_RATE_ADJ_FRACTION, 29, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_idle_fields[] = { - { DBS_TX_IDLE_BUSY, 1, 8, 0 }, - { DBS_TX_IDLE_IDLE, 1, 0, 0x0000 }, - { DBS_TX_IDLE_QUEUE, 7, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_hi_fields[] = { + { TSM_TS_HI_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_tx_init_fields[] = { - { DBS_TX_INIT_BUSY, 1, 8, 0 }, - { DBS_TX_INIT_INIT, 1, 0, 0x0000 }, - { DBS_TX_INIT_QUEUE, 7, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_lo_fields[] = { + { TSM_TS_LO_TIME, 32, 0, 0x0000 }, }; -static nthw_fpga_field_init_s dbs_tx_init_val_fields[] = { - { DBS_TX_INIT_VAL_IDX, 16, 0, 0x0000 }, - { DBS_TX_INIT_VAL_PTR, 15, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_offset_fields[] = { + { TSM_TS_OFFSET_NS, 30, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_ptr_fields[] = { - { DBS_TX_PTR_PTR, 16, 0, 0x0000 }, - { DBS_TX_PTR_QUEUE, 7, 16, 0x0000 }, - { DBS_TX_PTR_VALID, 1, 23, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_fields[] = { + { TSM_TS_STAT_OVERRUN, 1, 16, 0 }, + { TSM_TS_STAT_SAMPLES, 16, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_qos_ctrl_fields[] = { - { DBS_TX_QOS_CTRL_ADR, 1, 0, 0x0000 }, - { DBS_TX_QOS_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_hi_offset_fields[] = { + { TSM_TS_STAT_HI_OFFSET_NS, 32, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_qos_data_fields[] = { - { DBS_TX_QOS_DATA_BS, 27, 17, 0x0000 }, - { DBS_TX_QOS_DATA_EN, 1, 0, 0x0000 }, - { DBS_TX_QOS_DATA_IR, 16, 1, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_lo_offset_fields[] = { + { TSM_TS_STAT_LO_OFFSET_NS, 32, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_qos_rate_fields[] = { - { DBS_TX_QOS_RATE_DIV, 19, 16, 2 }, - { DBS_TX_QOS_RATE_MUL, 16, 0, 1 }, +static nthw_fpga_field_init_s tsm_ts_stat_tar_hi_fields[] = { + { TSM_TS_STAT_TAR_HI_SEC, 32, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_qp_ctrl_fields[] = { - { DBS_TX_QP_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_TX_QP_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_tar_lo_fields[] = { + { TSM_TS_STAT_TAR_LO_NS, 32, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_qp_data_fields[] = { - { DBS_TX_QP_DATA_VPORT, 1, 0, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_x_fields[] = { + { TSM_TS_STAT_X_NS, 32, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_uw_ctrl_fields[] = { - { DBS_TX_UW_CTRL_ADR, 7, 0, 0x0000 }, - { DBS_TX_UW_CTRL_CNT, 16, 16, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_x2_hi_fields[] = { + { TSM_TS_STAT_X2_HI_NS, 16, 0, 0 }, }; -static nthw_fpga_field_init_s dbs_tx_uw_data_fields[] = { - { DBS_TX_UW_DATA_GPA, 64, 0, 0x0000 }, { DBS_TX_UW_DATA_HID, 8, 64, 0x0000 }, - { DBS_TX_UW_DATA_INO, 1, 93, 0x0000 }, { DBS_TX_UW_DATA_INT, 1, 88, 0x0000 }, - { DBS_TX_UW_DATA_ISTK, 1, 92, 0x0000 }, { DBS_TX_UW_DATA_PCKED, 1, 87, 0x0000 }, - { DBS_TX_UW_DATA_QS, 15, 72, 0x0000 }, { DBS_TX_UW_DATA_VEC, 3, 89, 0x0000 }, +static nthw_fpga_field_init_s tsm_ts_stat_x2_lo_fields[] = { + { TSM_TS_STAT_X2_LO_NS, 32, 0, 0 }, }; -static nthw_fpga_register_init_s dbs_registers[] = { - { DBS_RX_AM_CTRL, 10, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_am_ctrl_fields }, - { DBS_RX_AM_DATA, 11, 75, NTHW_FPGA_REG_TYPE_WO, 0, 5, dbs_rx_am_data_fields }, - { DBS_RX_CONTROL, 0, 18, NTHW_FPGA_REG_TYPE_RW, 43008, 6, dbs_rx_control_fields }, - { DBS_RX_DR_CTRL, 18, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_dr_ctrl_fields }, - { DBS_RX_DR_DATA, 19, 89, NTHW_FPGA_REG_TYPE_WO, 0, 5, dbs_rx_dr_data_fields }, - { DBS_RX_IDLE, 8, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_rx_idle_fields }, - { DBS_RX_INIT, 2, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_rx_init_fields }, - { DBS_RX_INIT_VAL, 3, 31, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_init_val_fields }, - { DBS_RX_PTR, 4, 24, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_rx_ptr_fields }, - { DBS_RX_UW_CTRL, 14, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_rx_uw_ctrl_fields }, - { DBS_RX_UW_DATA, 15, 93, NTHW_FPGA_REG_TYPE_WO, 0, 7, dbs_rx_uw_data_fields }, - { DBS_TX_AM_CTRL, 12, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_am_ctrl_fields }, - { DBS_TX_AM_DATA, 13, 75, NTHW_FPGA_REG_TYPE_WO, 0, 5, dbs_tx_am_data_fields }, - { DBS_TX_CONTROL, 1, 18, NTHW_FPGA_REG_TYPE_RW, 66816, 6, dbs_tx_control_fields }, - { DBS_TX_DR_CTRL, 20, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_dr_ctrl_fields }, - { DBS_TX_DR_DATA, 21, 90, NTHW_FPGA_REG_TYPE_WO, 0, 6, dbs_tx_dr_data_fields }, - { DBS_TX_IDLE, 9, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_tx_idle_fields }, - { DBS_TX_INIT, 5, 9, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_tx_init_fields }, - { DBS_TX_INIT_VAL, 6, 31, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_init_val_fields }, - { DBS_TX_PTR, 7, 24, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, dbs_tx_ptr_fields }, - { DBS_TX_QOS_CTRL, 24, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_qos_ctrl_fields }, - { DBS_TX_QOS_DATA, 25, 44, NTHW_FPGA_REG_TYPE_WO, 0, 3, dbs_tx_qos_data_fields }, - { DBS_TX_QOS_RATE, 26, 35, NTHW_FPGA_REG_TYPE_RW, 131073, 2, dbs_tx_qos_rate_fields }, - { DBS_TX_QP_CTRL, 22, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_qp_ctrl_fields }, - { DBS_TX_QP_DATA, 23, 1, NTHW_FPGA_REG_TYPE_WO, 0, 1, dbs_tx_qp_data_fields }, - { DBS_TX_UW_CTRL, 16, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, dbs_tx_uw_ctrl_fields }, - { DBS_TX_UW_DATA, 17, 94, NTHW_FPGA_REG_TYPE_WO, 0, 8, dbs_tx_uw_data_fields }, +static nthw_fpga_field_init_s tsm_utc_offset_fields[] = { + { TSM_UTC_OFFSET_SEC, 8, 0, 0 }, +}; + +static nthw_fpga_register_init_s tsm_registers[] = { + { TSM_CON0_CONFIG, 24, 14, NTHW_FPGA_REG_TYPE_RW, 2320, 5, tsm_con0_config_fields }, + { + TSM_CON0_INTERFACE, 25, 20, NTHW_FPGA_REG_TYPE_RW, 524291, 5, + tsm_con0_interface_fields + }, + { TSM_CON0_SAMPLE_HI, 27, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con0_sample_hi_fields }, + { TSM_CON0_SAMPLE_LO, 26, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con0_sample_lo_fields }, + { TSM_CON1_CONFIG, 28, 14, NTHW_FPGA_REG_TYPE_RW, 2320, 5, tsm_con1_config_fields }, + { TSM_CON1_SAMPLE_HI, 30, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con1_sample_hi_fields }, + { TSM_CON1_SAMPLE_LO, 29, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con1_sample_lo_fields }, + { TSM_CON2_CONFIG, 31, 14, NTHW_FPGA_REG_TYPE_RW, 2320, 5, tsm_con2_config_fields }, + { TSM_CON2_SAMPLE_HI, 33, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con2_sample_hi_fields }, + { TSM_CON2_SAMPLE_LO, 32, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con2_sample_lo_fields }, + { TSM_CON3_CONFIG, 34, 10, NTHW_FPGA_REG_TYPE_RW, 841, 3, tsm_con3_config_fields }, + { TSM_CON3_SAMPLE_HI, 36, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con3_sample_hi_fields }, + { TSM_CON3_SAMPLE_LO, 35, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con3_sample_lo_fields }, + { TSM_CON4_CONFIG, 37, 10, NTHW_FPGA_REG_TYPE_RW, 841, 3, tsm_con4_config_fields }, + { TSM_CON4_SAMPLE_HI, 39, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con4_sample_hi_fields }, + { TSM_CON4_SAMPLE_LO, 38, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con4_sample_lo_fields }, + { TSM_CON5_CONFIG, 40, 10, NTHW_FPGA_REG_TYPE_RW, 841, 3, tsm_con5_config_fields }, + { TSM_CON5_SAMPLE_HI, 42, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con5_sample_hi_fields }, + { TSM_CON5_SAMPLE_LO, 41, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con5_sample_lo_fields }, + { TSM_CON6_CONFIG, 43, 10, NTHW_FPGA_REG_TYPE_RW, 841, 3, tsm_con6_config_fields }, + { TSM_CON6_SAMPLE_HI, 45, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con6_sample_hi_fields }, + { TSM_CON6_SAMPLE_LO, 44, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_con6_sample_lo_fields }, + { + TSM_CON7_HOST_SAMPLE_HI, 47, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + tsm_con7_host_sample_hi_fields + }, + { + TSM_CON7_HOST_SAMPLE_LO, 46, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + tsm_con7_host_sample_lo_fields + }, + { TSM_CONFIG, 0, 13, NTHW_FPGA_REG_TYPE_RW, 257, 6, tsm_config_fields }, + { TSM_INT_CONFIG, 2, 20, NTHW_FPGA_REG_TYPE_RW, 0, 2, tsm_int_config_fields }, + { TSM_INT_STAT, 3, 20, NTHW_FPGA_REG_TYPE_MIXED, 0, 2, tsm_int_stat_fields }, + { TSM_LED, 4, 27, NTHW_FPGA_REG_TYPE_RW, 16793600, 12, tsm_led_fields }, + { TSM_NTTS_CONFIG, 13, 7, NTHW_FPGA_REG_TYPE_RW, 32, 4, tsm_ntts_config_fields }, + { TSM_NTTS_EXT_STAT, 15, 32, NTHW_FPGA_REG_TYPE_MIXED, 0, 3, tsm_ntts_ext_stat_fields }, + { TSM_NTTS_LIMIT_HI, 23, 16, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_ntts_limit_hi_fields }, + { TSM_NTTS_LIMIT_LO, 22, 32, NTHW_FPGA_REG_TYPE_RW, 100000, 1, tsm_ntts_limit_lo_fields }, + { TSM_NTTS_OFFSET, 21, 30, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_ntts_offset_fields }, + { TSM_NTTS_SAMPLE_HI, 19, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ntts_sample_hi_fields }, + { TSM_NTTS_SAMPLE_LO, 18, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ntts_sample_lo_fields }, + { TSM_NTTS_STAT, 14, 17, NTHW_FPGA_REG_TYPE_RO, 0, 3, tsm_ntts_stat_fields }, + { TSM_NTTS_TS_T0_HI, 17, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ntts_ts_t0_hi_fields }, + { TSM_NTTS_TS_T0_LO, 16, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ntts_ts_t0_lo_fields }, + { + TSM_NTTS_TS_T0_OFFSET, 20, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + tsm_ntts_ts_t0_offset_fields + }, + { TSM_PB_CTRL, 63, 2, NTHW_FPGA_REG_TYPE_WO, 0, 2, tsm_pb_ctrl_fields }, + { TSM_PB_INSTMEM, 64, 32, NTHW_FPGA_REG_TYPE_WO, 0, 2, tsm_pb_instmem_fields }, + { TSM_PI_CTRL_I, 54, 32, NTHW_FPGA_REG_TYPE_WO, 0, 1, tsm_pi_ctrl_i_fields }, + { TSM_PI_CTRL_KI, 52, 24, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_pi_ctrl_ki_fields }, + { TSM_PI_CTRL_KP, 51, 24, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_pi_ctrl_kp_fields }, + { TSM_PI_CTRL_SHL, 53, 4, NTHW_FPGA_REG_TYPE_WO, 0, 1, tsm_pi_ctrl_shl_fields }, + { TSM_STAT, 1, 16, NTHW_FPGA_REG_TYPE_RO, 0, 9, tsm_stat_fields }, + { TSM_TIMER_CTRL, 48, 2, NTHW_FPGA_REG_TYPE_RW, 0, 2, tsm_timer_ctrl_fields }, + { TSM_TIMER_T0, 49, 30, NTHW_FPGA_REG_TYPE_RW, 50000, 1, tsm_timer_t0_fields }, + { TSM_TIMER_T1, 50, 30, NTHW_FPGA_REG_TYPE_RW, 50000, 1, tsm_timer_t1_fields }, + { TSM_TIME_HARDSET_HI, 12, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_time_hardset_hi_fields }, + { TSM_TIME_HARDSET_LO, 11, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_time_hardset_lo_fields }, + { TSM_TIME_HI, 9, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_time_hi_fields }, + { TSM_TIME_LO, 8, 32, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_time_lo_fields }, + { TSM_TIME_RATE_ADJ, 10, 29, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_time_rate_adj_fields }, + { TSM_TS_HI, 6, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_hi_fields }, + { TSM_TS_LO, 5, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_lo_fields }, + { TSM_TS_OFFSET, 7, 30, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_ts_offset_fields }, + { TSM_TS_STAT, 55, 17, NTHW_FPGA_REG_TYPE_RO, 0, 2, tsm_ts_stat_fields }, + { + TSM_TS_STAT_HI_OFFSET, 62, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + tsm_ts_stat_hi_offset_fields + }, + { + TSM_TS_STAT_LO_OFFSET, 61, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, + tsm_ts_stat_lo_offset_fields + }, + { TSM_TS_STAT_TAR_HI, 57, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_stat_tar_hi_fields }, + { TSM_TS_STAT_TAR_LO, 56, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_stat_tar_lo_fields }, + { TSM_TS_STAT_X, 58, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_stat_x_fields }, + { TSM_TS_STAT_X2_HI, 60, 16, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_stat_x2_hi_fields }, + { TSM_TS_STAT_X2_LO, 59, 32, NTHW_FPGA_REG_TYPE_RO, 0, 1, tsm_ts_stat_x2_lo_fields }, + { TSM_UTC_OFFSET, 65, 8, NTHW_FPGA_REG_TYPE_RW, 0, 1, tsm_utc_offset_fields }, }; static nthw_fpga_module_init_s fpga_modules[] = { { MOD_CAT, 0, MOD_CAT, 0, 21, NTHW_FPGA_BUS_TYPE_RAB1, 768, 34, cat_registers }, + { MOD_CSU, 0, MOD_CSU, 0, 0, NTHW_FPGA_BUS_TYPE_RAB1, 9728, 2, csu_registers }, + { MOD_DBS, 0, MOD_DBS, 0, 11, NTHW_FPGA_BUS_TYPE_RAB2, 12832, 27, dbs_registers }, + { MOD_FLM, 0, MOD_FLM, 0, 25, NTHW_FPGA_BUS_TYPE_RAB1, 1280, 43, flm_registers }, { MOD_GFG, 0, MOD_GFG, 1, 1, NTHW_FPGA_BUS_TYPE_RAB2, 8704, 10, gfg_registers }, { MOD_GMF, 0, MOD_GMF, 2, 5, NTHW_FPGA_BUS_TYPE_RAB2, 9216, 12, gmf_registers }, - { MOD_DBS, 0, MOD_DBS, 0, 11, NTHW_FPGA_BUS_TYPE_RAB2, 12832, 27, dbs_registers}, { MOD_GMF, 1, MOD_GMF, 2, 5, NTHW_FPGA_BUS_TYPE_RAB2, 9728, 12, gmf_registers }, { MOD_GPIO_PHY, 0, MOD_GPIO_PHY, 1, 0, NTHW_FPGA_BUS_TYPE_RAB0, 16386, 2, gpio_phy_registers }, + { MOD_HFU, 0, MOD_HFU, 0, 2, NTHW_FPGA_BUS_TYPE_RAB1, 9472, 2, hfu_registers }, { MOD_HIF, 0, MOD_HIF, 0, 0, NTHW_FPGA_BUS_TYPE_PCI, 0, 18, hif_registers }, { MOD_HSH, 0, MOD_HSH, 0, 5, NTHW_FPGA_BUS_TYPE_RAB1, 1536, 2, hsh_registers }, + { MOD_IFR, 0, MOD_IFR, 0, 7, NTHW_FPGA_BUS_TYPE_RAB1, 9984, 6, ifr_registers }, { MOD_IIC, 0, MOD_IIC, 0, 1, NTHW_FPGA_BUS_TYPE_RAB0, 768, 22, iic_registers }, { MOD_IIC, 1, MOD_IIC, 0, 1, NTHW_FPGA_BUS_TYPE_RAB0, 896, 22, iic_registers }, { MOD_IIC, 2, MOD_IIC, 0, 1, NTHW_FPGA_BUS_TYPE_RAB0, 24832, 22, iic_registers }, @@ -1747,6 +3009,10 @@ static nthw_fpga_module_init_s fpga_modules[] = { MOD_MAC_PCS, 1, MOD_MAC_PCS, 0, 2, NTHW_FPGA_BUS_TYPE_RAB2, 11776, 44, mac_pcs_registers }, + { MOD_MAC_RX, 0, MOD_MAC_RX, 0, 0, NTHW_FPGA_BUS_TYPE_RAB2, 10752, 9, mac_rx_registers }, + { MOD_MAC_RX, 1, MOD_MAC_RX, 0, 0, NTHW_FPGA_BUS_TYPE_RAB2, 12288, 9, mac_rx_registers }, + { MOD_MAC_TX, 0, MOD_MAC_TX, 0, 0, NTHW_FPGA_BUS_TYPE_RAB2, 11264, 5, mac_tx_registers }, + { MOD_MAC_TX, 1, MOD_MAC_TX, 0, 0, NTHW_FPGA_BUS_TYPE_RAB2, 12800, 5, mac_tx_registers }, { MOD_PCI_RD_TG, 0, MOD_PCI_RD_TG, 0, 1, NTHW_FPGA_BUS_TYPE_RAB0, 2320, 6, pci_rd_tg_registers @@ -1759,7 +3025,14 @@ static nthw_fpga_module_init_s fpga_modules[] = { { MOD_QSL, 0, MOD_QSL, 0, 7, NTHW_FPGA_BUS_TYPE_RAB1, 1792, 8, qsl_registers }, { MOD_RAC, 0, MOD_RAC, 3, 0, NTHW_FPGA_BUS_TYPE_PCI, 8192, 14, rac_registers }, { MOD_RMC, 0, MOD_RMC, 1, 3, NTHW_FPGA_BUS_TYPE_RAB0, 12288, 4, rmc_registers }, + { MOD_RPP_LR, 0, MOD_RPP_LR, 0, 2, NTHW_FPGA_BUS_TYPE_RAB1, 2304, 4, rpp_lr_registers }, { MOD_RST9563, 0, MOD_RST9563, 0, 5, NTHW_FPGA_BUS_TYPE_RAB0, 1024, 5, rst9563_registers }, + { MOD_SLC_LR, 0, MOD_SLC, 0, 2, NTHW_FPGA_BUS_TYPE_RAB1, 2048, 2, slc_registers }, + { MOD_TX_CPY, 0, MOD_CPY, 0, 4, NTHW_FPGA_BUS_TYPE_RAB1, 9216, 26, cpy_registers }, + { MOD_TX_INS, 0, MOD_INS, 0, 2, NTHW_FPGA_BUS_TYPE_RAB1, 8704, 2, ins_registers }, + { MOD_TX_RPL, 0, MOD_RPL, 0, 4, NTHW_FPGA_BUS_TYPE_RAB1, 8960, 6, rpl_registers }, + { MOD_STA, 0, MOD_STA, 0, 9, NTHW_FPGA_BUS_TYPE_RAB0, 2048, 17, sta_registers }, + { MOD_TSM, 0, MOD_TSM, 0, 8, NTHW_FPGA_BUS_TYPE_RAB2, 1024, 66, tsm_registers }, }; static nthw_fpga_prod_param_s product_parameters[] = { @@ -1919,5 +3192,5 @@ static nthw_fpga_prod_param_s product_parameters[] = { }; nthw_fpga_prod_init_s nthw_fpga_9563_055_049_0000 = { - 200, 9563, 55, 49, 0, 0, 1726740521, 152, product_parameters, 22, fpga_modules, + 200, 9563, 55, 49, 0, 0, 1726740521, 152, product_parameters, 37, fpga_modules, }; diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_defs.h index b6be02f45e..e6ed9e714b 100644 --- a/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_defs.h +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_defs.h @@ -15,6 +15,7 @@ #define MOD_UNKNOWN (0L)/* Unknown/uninitialized - keep this as the first element */ #define MOD_CAT (0x30b447c2UL) +#define MOD_CPY (0x1ddc186fUL) #define MOD_CSU (0x3f470787UL) #define MOD_DBS (0x80b29727UL) #define MOD_FLM (0xe7ba53a4UL) @@ -27,8 +28,11 @@ #define MOD_I2CM (0x93bc7780UL) #define MOD_IFR (0x9b01f1e6UL) #define MOD_IIC (0x7629cddbUL) +#define MOD_INS (0x24df4b78UL) #define MOD_KM (0xcfbd9dbeUL) #define MOD_MAC_PCS (0x7abe24c7UL) +#define MOD_MAC_RX (0x6347b490UL) +#define MOD_MAC_TX (0x351d1316UL) #define MOD_PCIE3 (0xfbc48c18UL) #define MOD_PCI_RD_TG (0x9ad9eed2UL) #define MOD_PCI_WR_TG (0x274b69e1UL) @@ -36,14 +40,19 @@ #define MOD_QSL (0x448ed859UL) #define MOD_RAC (0xae830b42UL) #define MOD_RMC (0x236444eUL) +#define MOD_RPL (0x6de535c3UL) +#define MOD_RPF (0x8d30dcddUL) #define MOD_RPP_LR (0xba7f945cUL) #define MOD_RST9563 (0x385d6d1dUL) #define MOD_SDC (0xd2369530UL) +#define MOD_SLC (0x1aef1f38UL) #define MOD_SLC_LR (0x969fc50bUL) +#define MOD_STA (0x76fae64dUL) +#define MOD_TSM (0x35422a24UL) #define MOD_TX_CPY (0x60acf217UL) #define MOD_TX_INS (0x59afa100UL) #define MOD_TX_RPL (0x1095dfbbUL) -#define MOD_IDX_COUNT (14) +#define MOD_IDX_COUNT (36) /* aliases - only aliases go below this point */ #endif /* _NTHW_FPGA_MOD_DEFS_H_ */ diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_str_map.c b/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_str_map.c index 150b9dd976..e8ed7faf0d 100644 --- a/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_str_map.c +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_mod_str_map.c @@ -19,5 +19,7 @@ const struct nthw_fpga_mod_str_s sa_nthw_fpga_mod_str_map[] = { { MOD_RAC, "RAC" }, { MOD_RST9563, "RST9563" }, { MOD_SDC, "SDC" }, + { MOD_STA, "STA" }, + { MOD_TSM, "TSM" }, { 0UL, NULL } }; diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h index 3560eeda7d..4d299c6aa8 100644 --- a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h @@ -30,6 +30,8 @@ #include "nthw_fpga_reg_defs_ins.h" #include "nthw_fpga_reg_defs_km.h" #include "nthw_fpga_reg_defs_mac_pcs.h" +#include "nthw_fpga_reg_defs_mac_rx.h" +#include "nthw_fpga_reg_defs_mac_tx.h" #include "nthw_fpga_reg_defs_pcie3.h" #include "nthw_fpga_reg_defs_pci_rd_tg.h" #include "nthw_fpga_reg_defs_pci_wr_tg.h" @@ -37,12 +39,15 @@ #include "nthw_fpga_reg_defs_qsl.h" #include "nthw_fpga_reg_defs_rac.h" #include "nthw_fpga_reg_defs_rmc.h" +#include "nthw_fpga_reg_defs_rpf.h" #include "nthw_fpga_reg_defs_rpl.h" #include "nthw_fpga_reg_defs_rpp_lr.h" #include "nthw_fpga_reg_defs_rst9563.h" #include "nthw_fpga_reg_defs_sdc.h" +#include "nthw_fpga_reg_defs_tsm.h" #include "nthw_fpga_reg_defs_slc.h" #include "nthw_fpga_reg_defs_slc_lr.h" +#include "nthw_fpga_reg_defs_sta.h" #include "nthw_fpga_reg_defs_tx_cpy.h" #include "nthw_fpga_reg_defs_tx_ins.h" #include "nthw_fpga_reg_defs_tx_rpl.h" diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_mac_rx.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_mac_rx.h new file mode 100644 index 0000000000..3829c10f3b --- /dev/null +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_mac_rx.h @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _NTHW_FPGA_REG_DEFS_MAC_RX_ +#define _NTHW_FPGA_REG_DEFS_MAC_RX_ + +/* MAC_RX */ +#define MAC_RX_BAD_FCS (0xca07f618UL) +#define MAC_RX_BAD_FCS_COUNT (0x11d5ba0eUL) +#define MAC_RX_FRAGMENT (0x5363b736UL) +#define MAC_RX_FRAGMENT_COUNT (0xf664c9aUL) +#define MAC_RX_PACKET_BAD_FCS (0x4cb8b34cUL) +#define MAC_RX_PACKET_BAD_FCS_COUNT (0xb6701e28UL) +#define MAC_RX_PACKET_SMALL (0xed318a65UL) +#define MAC_RX_PACKET_SMALL_COUNT (0x72095ec7UL) +#define MAC_RX_TOTAL_BYTES (0x831313e2UL) +#define MAC_RX_TOTAL_BYTES_COUNT (0xe5d8be59UL) +#define MAC_RX_TOTAL_GOOD_BYTES (0x912c2d1cUL) +#define MAC_RX_TOTAL_GOOD_BYTES_COUNT (0x63bb5f3eUL) +#define MAC_RX_TOTAL_GOOD_PACKETS (0xfbb4f497UL) +#define MAC_RX_TOTAL_GOOD_PACKETS_COUNT (0xae9d21b0UL) +#define MAC_RX_TOTAL_PACKETS (0xb0ea3730UL) +#define MAC_RX_TOTAL_PACKETS_COUNT (0x532c885dUL) +#define MAC_RX_UNDERSIZE (0xb6fa4bdbUL) +#define MAC_RX_UNDERSIZE_COUNT (0x471945ffUL) + +#endif /* _NTHW_FPGA_REG_DEFS_MAC_RX_ */ diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_mac_tx.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_mac_tx.h new file mode 100644 index 0000000000..6a77d449ae --- /dev/null +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_mac_tx.h @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _NTHW_FPGA_REG_DEFS_MAC_TX_ +#define _NTHW_FPGA_REG_DEFS_MAC_TX_ + +/* MAC_TX */ +#define MAC_TX_PACKET_SMALL (0xcfcb5e97UL) +#define MAC_TX_PACKET_SMALL_COUNT (0x84345b01UL) +#define MAC_TX_TOTAL_BYTES (0x7bd15854UL) +#define MAC_TX_TOTAL_BYTES_COUNT (0x61fb238cUL) +#define MAC_TX_TOTAL_GOOD_BYTES (0xcf0260fUL) +#define MAC_TX_TOTAL_GOOD_BYTES_COUNT (0x8603398UL) +#define MAC_TX_TOTAL_GOOD_PACKETS (0xd89f151UL) +#define MAC_TX_TOTAL_GOOD_PACKETS_COUNT (0x12c47c77UL) +#define MAC_TX_TOTAL_PACKETS (0xe37b5ed4UL) +#define MAC_TX_TOTAL_PACKETS_COUNT (0x21ddd2ddUL) + +#endif /* _NTHW_FPGA_REG_DEFS_MAC_TX_ */ diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_rpf.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_rpf.h new file mode 100644 index 0000000000..72f450b85d --- /dev/null +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_rpf.h @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _NTHW_FPGA_REG_DEFS_RPF_ +#define _NTHW_FPGA_REG_DEFS_RPF_ + +/* RPF */ +#define RPF_CONTROL (0x7a5bdb50UL) +#define RPF_CONTROL_KEEP_ALIVE_EN (0x80be3ffcUL) +#define RPF_CONTROL_PEN (0xb23137b8UL) +#define RPF_CONTROL_RPP_EN (0xdb51f109UL) +#define RPF_CONTROL_ST_TGL_EN (0x45a6ecfaUL) +#define RPF_TS_SORT_PRG (0xff1d137eUL) +#define RPF_TS_SORT_PRG_MATURING_DELAY (0x2a38e127UL) +#define RPF_TS_SORT_PRG_TS_AT_EOF (0x9f27d433UL) + +#endif /* _NTHW_FPGA_REG_DEFS_RPF_ */ diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_sta.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_sta.h new file mode 100644 index 0000000000..0cd183fcaa --- /dev/null +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_sta.h @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _NTHW_FPGA_REG_DEFS_STA_ +#define _NTHW_FPGA_REG_DEFS_STA_ + +/* STA */ +#define STA_BYTE (0xa08364d4UL) +#define STA_BYTE_CNT (0x3119e6bcUL) +#define STA_CFG (0xcecaf9f4UL) +#define STA_CFG_CNT_CLEAR (0xc325e12eUL) +#define STA_CFG_CNT_FRZ (0x8c27a596UL) +#define STA_CFG_DMA_ENA (0x940dbacUL) +#define STA_CFG_TX_DISABLE (0x30f43250UL) +#define STA_CV_ERR (0x7db7db5dUL) +#define STA_CV_ERR_CNT (0x2c02fbbeUL) +#define STA_FCS_ERR (0xa0de1647UL) +#define STA_FCS_ERR_CNT (0xc68c37d1UL) +#define STA_HOST_ADR_LSB (0xde569336UL) +#define STA_HOST_ADR_LSB_LSB (0xb6f2f94bUL) +#define STA_HOST_ADR_MSB (0xdf94f901UL) +#define STA_HOST_ADR_MSB_MSB (0x114798c8UL) +#define STA_LOAD_BIN (0x2e842591UL) +#define STA_LOAD_BIN_BIN (0x1a2b942eUL) +#define STA_LOAD_BPS_RX_0 (0xbf8f4595UL) +#define STA_LOAD_BPS_RX_0_BPS (0x41647781UL) +#define STA_LOAD_BPS_RX_1 (0xc8887503UL) +#define STA_LOAD_BPS_RX_1_BPS (0x7c045e31UL) +#define STA_LOAD_BPS_TX_0 (0x9ae41a49UL) +#define STA_LOAD_BPS_TX_0_BPS (0x870b7e06UL) +#define STA_LOAD_BPS_TX_1 (0xede32adfUL) +#define STA_LOAD_BPS_TX_1_BPS (0xba6b57b6UL) +#define STA_LOAD_PPS_RX_0 (0x811173c3UL) +#define STA_LOAD_PPS_RX_0_PPS (0xbee573fcUL) +#define STA_LOAD_PPS_RX_1 (0xf6164355UL) +#define STA_LOAD_PPS_RX_1_PPS (0x83855a4cUL) +#define STA_LOAD_PPS_TX_0 (0xa47a2c1fUL) +#define STA_LOAD_PPS_TX_0_PPS (0x788a7a7bUL) +#define STA_LOAD_PPS_TX_1 (0xd37d1c89UL) +#define STA_LOAD_PPS_TX_1_PPS (0x45ea53cbUL) +#define STA_PCKT (0xecc8f30aUL) +#define STA_PCKT_CNT (0x63291d16UL) +#define STA_STATUS (0x91c5c51cUL) +#define STA_STATUS_STAT_TOGGLE_MISSED (0xf7242b11UL) + +#endif /* _NTHW_FPGA_REG_DEFS_STA_ */ diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_tsm.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_tsm.h new file mode 100644 index 0000000000..cdb733ee17 --- /dev/null +++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_tsm.h @@ -0,0 +1,205 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Napatech A/S + */ + +#ifndef _NTHW_FPGA_REG_DEFS_TSM_ +#define _NTHW_FPGA_REG_DEFS_TSM_ + +/* TSM */ +#define TSM_CON0_CONFIG (0xf893d371UL) +#define TSM_CON0_CONFIG_BLIND (0x59ccfcbUL) +#define TSM_CON0_CONFIG_DC_SRC (0x1879812bUL) +#define TSM_CON0_CONFIG_PORT (0x3ff0bb08UL) +#define TSM_CON0_CONFIG_PPSIN_2_5V (0xb8e78227UL) +#define TSM_CON0_CONFIG_SAMPLE_EDGE (0x4a4022ebUL) +#define TSM_CON0_INTERFACE (0x76e93b59UL) +#define TSM_CON0_INTERFACE_EX_TERM (0xd079b416UL) +#define TSM_CON0_INTERFACE_IN_REF_PWM (0x16f73c33UL) +#define TSM_CON0_INTERFACE_PWM_ENA (0x3629e73fUL) +#define TSM_CON0_INTERFACE_RESERVED (0xf9c5066UL) +#define TSM_CON0_INTERFACE_VTERM_PWM (0x6d2b1e23UL) +#define TSM_CON0_SAMPLE_HI (0x6e536b8UL) +#define TSM_CON0_SAMPLE_HI_SEC (0x5fc26159UL) +#define TSM_CON0_SAMPLE_LO (0x8bea5689UL) +#define TSM_CON0_SAMPLE_LO_NS (0x13d0010dUL) +#define TSM_CON1_CONFIG (0x3439d3efUL) +#define TSM_CON1_CONFIG_BLIND (0x98932ebdUL) +#define TSM_CON1_CONFIG_DC_SRC (0xa1825ac3UL) +#define TSM_CON1_CONFIG_PORT (0xe266628dUL) +#define TSM_CON1_CONFIG_PPSIN_2_5V (0x6f05027fUL) +#define TSM_CON1_CONFIG_SAMPLE_EDGE (0x2f2719adUL) +#define TSM_CON1_SAMPLE_HI (0xc76be978UL) +#define TSM_CON1_SAMPLE_HI_SEC (0xe639bab1UL) +#define TSM_CON1_SAMPLE_LO (0x4a648949UL) +#define TSM_CON1_SAMPLE_LO_NS (0x8edfe07bUL) +#define TSM_CON2_CONFIG (0xbab6d40cUL) +#define TSM_CON2_CONFIG_BLIND (0xe4f20b66UL) +#define TSM_CON2_CONFIG_DC_SRC (0xb0ff30baUL) +#define TSM_CON2_CONFIG_PORT (0x5fac0e43UL) +#define TSM_CON2_CONFIG_PPSIN_2_5V (0xcc5384d6UL) +#define TSM_CON2_CONFIG_SAMPLE_EDGE (0x808e5467UL) +#define TSM_CON2_SAMPLE_HI (0x5e898f79UL) +#define TSM_CON2_SAMPLE_HI_SEC (0xf744d0c8UL) +#define TSM_CON2_SAMPLE_LO (0xd386ef48UL) +#define TSM_CON2_SAMPLE_LO_NS (0xf2bec5a0UL) +#define TSM_CON3_CONFIG (0x761cd492UL) +#define TSM_CON3_CONFIG_BLIND (0x79fdea10UL) +#define TSM_CON3_CONFIG_PORT (0x823ad7c6UL) +#define TSM_CON3_CONFIG_SAMPLE_EDGE (0xe5e96f21UL) +#define TSM_CON3_SAMPLE_HI (0x9f0750b9UL) +#define TSM_CON3_SAMPLE_HI_SEC (0x4ebf0b20UL) +#define TSM_CON3_SAMPLE_LO (0x12083088UL) +#define TSM_CON3_SAMPLE_LO_NS (0x6fb124d6UL) +#define TSM_CON4_CONFIG (0x7cd9dd8bUL) +#define TSM_CON4_CONFIG_BLIND (0x1c3040d0UL) +#define TSM_CON4_CONFIG_PORT (0xff49d19eUL) +#define TSM_CON4_CONFIG_SAMPLE_EDGE (0x4adc9b2UL) +#define TSM_CON4_SAMPLE_HI (0xb63c453aUL) +#define TSM_CON4_SAMPLE_HI_SEC (0xd5be043aUL) +#define TSM_CON4_SAMPLE_LO (0x3b33250bUL) +#define TSM_CON4_SAMPLE_LO_NS (0xa7c8e16UL) +#define TSM_CON5_CONFIG (0xb073dd15UL) +#define TSM_CON5_CONFIG_BLIND (0x813fa1a6UL) +#define TSM_CON5_CONFIG_PORT (0x22df081bUL) +#define TSM_CON5_CONFIG_SAMPLE_EDGE (0x61caf2f4UL) +#define TSM_CON5_SAMPLE_HI (0x77b29afaUL) +#define TSM_CON5_SAMPLE_HI_SEC (0x6c45dfd2UL) +#define TSM_CON5_SAMPLE_LO (0xfabdfacbUL) +#define TSM_CON5_SAMPLE_LO_TIME (0x945d87e8UL) +#define TSM_CON6_CONFIG (0x3efcdaf6UL) +#define TSM_CON6_CONFIG_BLIND (0xfd5e847dUL) +#define TSM_CON6_CONFIG_PORT (0x9f1564d5UL) +#define TSM_CON6_CONFIG_SAMPLE_EDGE (0xce63bf3eUL) +#define TSM_CON6_SAMPLE_HI (0xee50fcfbUL) +#define TSM_CON6_SAMPLE_HI_SEC (0x7d38b5abUL) +#define TSM_CON6_SAMPLE_LO (0x635f9ccaUL) +#define TSM_CON6_SAMPLE_LO_NS (0xeb124abbUL) +#define TSM_CON7_HOST_SAMPLE_HI (0xdcd90e52UL) +#define TSM_CON7_HOST_SAMPLE_HI_SEC (0xd98d3618UL) +#define TSM_CON7_HOST_SAMPLE_LO (0x51d66e63UL) +#define TSM_CON7_HOST_SAMPLE_LO_NS (0x8f5594ddUL) +#define TSM_CONFIG (0xef5dec83UL) +#define TSM_CONFIG_NTTS_SRC (0x1b60227bUL) +#define TSM_CONFIG_NTTS_SYNC (0x43e0a69dUL) +#define TSM_CONFIG_TIMESET_EDGE (0x8c381127UL) +#define TSM_CONFIG_TIMESET_SRC (0xe7590a31UL) +#define TSM_CONFIG_TIMESET_UP (0x561980c1UL) +#define TSM_CONFIG_TS_FORMAT (0xe6efc2faUL) +#define TSM_INT_CONFIG (0x9a0d52dUL) +#define TSM_INT_CONFIG_AUTO_DISABLE (0x9581470UL) +#define TSM_INT_CONFIG_MASK (0xf00cd3d7UL) +#define TSM_INT_STAT (0xa4611a70UL) +#define TSM_INT_STAT_CAUSE (0x315168cfUL) +#define TSM_INT_STAT_ENABLE (0x980a12d1UL) +#define TSM_LED (0x6ae05f87UL) +#define TSM_LED_LED0_BG_COLOR (0x897cf9eeUL) +#define TSM_LED_LED0_COLOR (0x6d7ada39UL) +#define TSM_LED_LED0_MODE (0x6087b644UL) +#define TSM_LED_LED0_SRC (0x4fe29639UL) +#define TSM_LED_LED1_BG_COLOR (0x66be92d0UL) +#define TSM_LED_LED1_COLOR (0xcb0dd18dUL) +#define TSM_LED_LED1_MODE (0xabdb65e1UL) +#define TSM_LED_LED1_SRC (0x7282bf89UL) +#define TSM_LED_LED2_BG_COLOR (0x8d8929d3UL) +#define TSM_LED_LED2_COLOR (0xfae5cb10UL) +#define TSM_LED_LED2_MODE (0x2d4f174fUL) +#define TSM_LED_LED2_SRC (0x3522c559UL) +#define TSM_NTTS_CONFIG (0x8bc38bdeUL) +#define TSM_NTTS_CONFIG_AUTO_HARDSET (0xd75be25dUL) +#define TSM_NTTS_CONFIG_EXT_CLK_ADJ (0x700425b6UL) +#define TSM_NTTS_CONFIG_HIGH_SAMPLE (0x37135b7eUL) +#define TSM_NTTS_CONFIG_TS_SRC_FORMAT (0x6e6e707UL) +#define TSM_NTTS_EXT_STAT (0x2b0315b7UL) +#define TSM_NTTS_EXT_STAT_MASTER_ID (0xf263315eUL) +#define TSM_NTTS_EXT_STAT_MASTER_REV (0xd543795eUL) +#define TSM_NTTS_EXT_STAT_MASTER_STAT (0x92d96f5eUL) +#define TSM_NTTS_LIMIT_HI (0x1ddaa85fUL) +#define TSM_NTTS_LIMIT_HI_SEC (0x315c6ef2UL) +#define TSM_NTTS_LIMIT_LO (0x90d5c86eUL) +#define TSM_NTTS_LIMIT_LO_NS (0xe6d94d9aUL) +#define TSM_NTTS_OFFSET (0x6436e72UL) +#define TSM_NTTS_OFFSET_NS (0x12d43a06UL) +#define TSM_NTTS_SAMPLE_HI (0xcdc8aa3eUL) +#define TSM_NTTS_SAMPLE_HI_SEC (0x4f6588fdUL) +#define TSM_NTTS_SAMPLE_LO (0x40c7ca0fUL) +#define TSM_NTTS_SAMPLE_LO_NS (0x6e43ff97UL) +#define TSM_NTTS_STAT (0x6502b820UL) +#define TSM_NTTS_STAT_NTTS_VALID (0x3e184471UL) +#define TSM_NTTS_STAT_SIGNAL_LOST (0x178bedfdUL) +#define TSM_NTTS_STAT_SYNC_LOST (0xe4cd53dfUL) +#define TSM_NTTS_TS_T0_HI (0x1300d1b6UL) +#define TSM_NTTS_TS_T0_HI_TIME (0xa016ae4fUL) +#define TSM_NTTS_TS_T0_LO (0x9e0fb187UL) +#define TSM_NTTS_TS_T0_LO_TIME (0x82006941UL) +#define TSM_NTTS_TS_T0_OFFSET (0xbf70ce4fUL) +#define TSM_NTTS_TS_T0_OFFSET_COUNT (0x35dd4398UL) +#define TSM_PB_CTRL (0x7a8b60faUL) +#define TSM_PB_CTRL_INSTMEM_WR (0xf96e2cbcUL) +#define TSM_PB_CTRL_RESET (0xa38ade8bUL) +#define TSM_PB_CTRL_RST (0x3aaa82f4UL) +#define TSM_PB_INSTMEM (0xb54aeecUL) +#define TSM_PB_INSTMEM_MEM_ADDR (0x9ac79b6eUL) +#define TSM_PB_INSTMEM_MEM_DATA (0x65aefa38UL) +#define TSM_PI_CTRL_I (0x8d71a4e2UL) +#define TSM_PI_CTRL_I_VAL (0x98baedc9UL) +#define TSM_PI_CTRL_KI (0xa1bd86cbUL) +#define TSM_PI_CTRL_KI_GAIN (0x53faa916UL) +#define TSM_PI_CTRL_KP (0xc5d62e0bUL) +#define TSM_PI_CTRL_KP_GAIN (0x7723fa45UL) +#define TSM_PI_CTRL_SHL (0xaa518701UL) +#define TSM_PI_CTRL_SHL_VAL (0x56f56a6fUL) +#define TSM_STAT (0xa55bf677UL) +#define TSM_STAT_HARD_SYNC (0x7fff20fdUL) +#define TSM_STAT_LINK_CON0 (0x216086f0UL) +#define TSM_STAT_LINK_CON1 (0x5667b666UL) +#define TSM_STAT_LINK_CON2 (0xcf6ee7dcUL) +#define TSM_STAT_LINK_CON3 (0xb869d74aUL) +#define TSM_STAT_LINK_CON4 (0x260d42e9UL) +#define TSM_STAT_LINK_CON5 (0x510a727fUL) +#define TSM_STAT_NTTS_INSYNC (0xb593a245UL) +#define TSM_STAT_PTP_MI_PRESENT (0x43131eb0UL) +#define TSM_TIMER_CTRL (0x648da051UL) +#define TSM_TIMER_CTRL_TIMER_EN_T0 (0x17cee154UL) +#define TSM_TIMER_CTRL_TIMER_EN_T1 (0x60c9d1c2UL) +#define TSM_TIMER_T0 (0x417217a5UL) +#define TSM_TIMER_T0_MAX_COUNT (0xaa601706UL) +#define TSM_TIMER_T1 (0x36752733UL) +#define TSM_TIMER_T1_MAX_COUNT (0x6beec8c6UL) +#define TSM_TIME_HARDSET_HI (0xf28bdb46UL) +#define TSM_TIME_HARDSET_HI_TIME (0x2d9a28baUL) +#define TSM_TIME_HARDSET_LO (0x7f84bb77UL) +#define TSM_TIME_HARDSET_LO_TIME (0xf8cefb4UL) +#define TSM_TIME_HI (0x175acea1UL) +#define TSM_TIME_HI_SEC (0xc0e9c9a1UL) +#define TSM_TIME_LO (0x9a55ae90UL) +#define TSM_TIME_LO_NS (0x879c5c4bUL) +#define TSM_TIME_RATE_ADJ (0xb1cc4bb1UL) +#define TSM_TIME_RATE_ADJ_FRACTION (0xb7ab96UL) +#define TSM_TS_HI (0xccfe9e5eUL) +#define TSM_TS_HI_TIME (0xc23fed30UL) +#define TSM_TS_LO (0x41f1fe6fUL) +#define TSM_TS_LO_TIME (0xe0292a3eUL) +#define TSM_TS_OFFSET (0x4b2e6e13UL) +#define TSM_TS_OFFSET_NS (0x68c286b9UL) +#define TSM_TS_STAT (0x64d41b8cUL) +#define TSM_TS_STAT_OVERRUN (0xad9db92aUL) +#define TSM_TS_STAT_SAMPLES (0xb6350e0bUL) +#define TSM_TS_STAT_HI_OFFSET (0x1aa2ddf2UL) +#define TSM_TS_STAT_HI_OFFSET_NS (0xeb040e0fUL) +#define TSM_TS_STAT_LO_OFFSET (0x81218579UL) +#define TSM_TS_STAT_LO_OFFSET_NS (0xb7ff33UL) +#define TSM_TS_STAT_TAR_HI (0x65af24b6UL) +#define TSM_TS_STAT_TAR_HI_SEC (0x7e92f619UL) +#define TSM_TS_STAT_TAR_LO (0xe8a04487UL) +#define TSM_TS_STAT_TAR_LO_NS (0xf7b3f439UL) +#define TSM_TS_STAT_X (0x419f0ddUL) +#define TSM_TS_STAT_X_NS (0xa48c3f27UL) +#define TSM_TS_STAT_X2_HI (0xd6b1c517UL) +#define TSM_TS_STAT_X2_HI_NS (0x4288c50fUL) +#define TSM_TS_STAT_X2_LO (0x5bbea526UL) +#define TSM_TS_STAT_X2_LO_NS (0x92633c13UL) +#define TSM_UTC_OFFSET (0xf622a13aUL) +#define TSM_UTC_OFFSET_SEC (0xd9c80209UL) + +#endif /* _NTHW_FPGA_REG_DEFS_TSM_ */ diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c index bff893ec7a..2a2643a106 100644 --- a/drivers/net/ntnic/ntnic_ethdev.c +++ b/drivers/net/ntnic/ntnic_ethdev.c @@ -4,6 +4,9 @@ */ #include +#include + +#include #include #include @@ -15,6 +18,7 @@ #include +#include "rte_spinlock.h" #include "ntlog.h" #include "ntdrv_4ga.h" #include "ntos_drv.h" @@ -23,9 +27,19 @@ #include "ntnic_vfio.h" #include "ntnic_mod_reg.h" #include "nt_util.h" - +#include "profile_inline/flm_age_queue.h" +#include "profile_inline/flm_evt_queue.h" +#include "rte_pmd_ntnic.h" + +const rte_thread_attr_t thread_attr = { .priority = RTE_THREAD_PRIORITY_NORMAL }; +#define THREAD_CREATE(a, b, c) rte_thread_create(a, &thread_attr, b, c) +#define THREAD_CTRL_CREATE(a, b, c, d) rte_thread_create_internal_control(a, b, c, d) +#define THREAD_JOIN(a) rte_thread_join(a, NULL) +#define THREAD_FUNC static uint32_t +#define THREAD_RETURN (0) #define HW_MAX_PKT_LEN (10000) #define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN) +#define MIN_MTU_INLINE 512 #define EXCEPTION_PATH_HID 0 @@ -36,9 +50,6 @@ #define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1) #define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1) -/* Max RSS queues */ -#define MAX_QUEUES 125 - #define NUM_VQ_SEGS(_data_size_) \ ({ \ size_t _size = (_data_size_); \ @@ -60,6 +71,13 @@ #define MAX_RX_PACKETS 128 #define MAX_TX_PACKETS 128 +#define MTUINITVAL 1500 + +uint64_t rte_tsc_freq; + +static void (*previous_handler)(int sig); +static rte_thread_t shutdown_tid; + int kill_pmd; #define ETH_DEV_NTNIC_HELP_ARG "help" @@ -83,7 +101,7 @@ static const struct rte_pci_id nthw_pci_id_map[] = { static const struct sg_ops_s *sg_ops; -static rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER; +rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER; /* * Store and get adapter info @@ -120,6 +138,16 @@ store_pdrv(struct drv_s *p_drv) rte_spinlock_unlock(&hwlock); } +static void clear_pdrv(struct drv_s *p_drv) +{ + if (p_drv->adapter_no > NUM_ADAPTER_MAX) + return; + + rte_spinlock_lock(&hwlock); + _g_p_drv[p_drv->adapter_no] = NULL; + rte_spinlock_unlock(&hwlock); +} + static struct drv_s * get_pdrv_from_pci(struct rte_pci_addr addr) { @@ -141,6 +169,102 @@ get_pdrv_from_pci(struct rte_pci_addr addr) return p_drv; } +static int dpdk_stats_collect(struct pmd_internals *internals, struct rte_eth_stats *stats) +{ + const struct ntnic_filter_ops *ntnic_filter_ops = get_ntnic_filter_ops(); + + if (ntnic_filter_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "ntnic_filter_ops uninitialized"); + return -1; + } + + unsigned int i; + struct drv_s *p_drv = internals->p_drv; + struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat; + const int if_index = internals->n_intf_no; + uint64_t rx_total = 0; + uint64_t rx_total_b = 0; + uint64_t tx_total = 0; + uint64_t tx_total_b = 0; + uint64_t tx_err_total = 0; + + if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 || + if_index > NUM_ADAPTER_PORTS_MAX) { + NT_LOG_DBGX(WRN, NTNIC, "error exit"); + return -1; + } + + /* + * Pull the latest port statistic numbers (Rx/Tx pkts and bytes) + * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays + */ + ntnic_filter_ops->poll_statistics(internals); + + memset(stats, 0, sizeof(*stats)); + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues; i++) { + stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts; + stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes; + rx_total += stats->q_ipackets[i]; + rx_total_b += stats->q_ibytes[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues; i++) { + stats->q_opackets[i] = internals->txq_scg[i].tx_pkts; + stats->q_obytes[i] = internals->txq_scg[i].tx_bytes; + stats->q_errors[i] = internals->txq_scg[i].err_pkts; + tx_total += stats->q_opackets[i]; + tx_total_b += stats->q_obytes[i]; + tx_err_total += stats->q_errors[i]; + } + + stats->imissed = internals->rx_missed; + stats->ipackets = rx_total; + stats->ibytes = rx_total_b; + stats->opackets = tx_total; + stats->obytes = tx_total_b; + stats->oerrors = tx_err_total; + + return 0; +} + +static int dpdk_stats_reset(struct pmd_internals *internals, struct ntdrv_4ga_s *p_nt_drv, + int n_intf_no) +{ + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat; + unsigned int i; + + if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 || n_intf_no > NUM_ADAPTER_PORTS_MAX) + return -1; + + rte_spinlock_lock(&p_nt_drv->stat_lck); + + /* Rx */ + for (i = 0; i < internals->nb_rx_queues; i++) { + internals->rxq_scg[i].rx_pkts = 0; + internals->rxq_scg[i].rx_bytes = 0; + internals->rxq_scg[i].err_pkts = 0; + } + + internals->rx_missed = 0; + + /* Tx */ + for (i = 0; i < internals->nb_tx_queues; i++) { + internals->txq_scg[i].tx_pkts = 0; + internals->txq_scg[i].tx_bytes = 0; + internals->txq_scg[i].err_pkts = 0; + } + + p_nt4ga_stat->n_totals_reset_timestamp = time(NULL); + + rte_spinlock_unlock(&p_nt_drv->stat_lck); + + return 0; +} + static int eth_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) { @@ -151,7 +275,7 @@ eth_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) return -1; } - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; const int n_intf_no = internals->n_intf_no; struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; @@ -179,6 +303,23 @@ eth_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) return 0; } +static int eth_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + dpdk_stats_collect(internals, stats); + return 0; +} + +static int eth_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct drv_s *p_drv = internals->p_drv; + struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv; + const int if_index = internals->n_intf_no; + dpdk_stats_reset(internals, p_nt_drv, if_index); + return 0; +} + static int eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) { @@ -189,7 +330,7 @@ eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info return -1; } - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; const int n_intf_no = internals->n_intf_no; struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; @@ -199,6 +340,15 @@ eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info dev_info->max_rx_pktlen = HW_MAX_PKT_LEN; dev_info->max_mtu = MAX_MTU; + if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE) { + dev_info->min_mtu = MIN_MTU_INLINE; + dev_info->flow_type_rss_offloads = NT_ETH_RSS_OFFLOAD_MASK; + dev_info->hash_key_size = MAX_RSS_KEY_LEN; + + dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT) | + RTE_ETH_HASH_ALGO_CAPA_MASK(TOEPLITZ); + } + if (internals->p_drv) { dev_info->max_rx_queues = internals->nb_rx_queues; dev_info->max_tx_queues = internals->nb_tx_queues; @@ -811,14 +961,14 @@ static int deallocate_hw_virtio_queues(struct hwq_s *hwq) static void eth_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t queue_id) { - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id]; deallocate_hw_virtio_queues(&tx_q->hwq); } static void eth_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t queue_id) { - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id]; deallocate_hw_virtio_queues(&rx_q->hwq); } @@ -848,7 +998,7 @@ static int eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, { NT_LOG_DBGX(DBG, NTNIC, "Rx queue setup"); struct rte_pktmbuf_pool_private *mbp_priv; - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id]; struct drv_s *p_drv = internals->p_drv; struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv; @@ -916,7 +1066,7 @@ static int eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, } NT_LOG_DBGX(DBG, NTNIC, "Tx queue setup"); - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; struct drv_s *p_drv = internals->p_drv; struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv; struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id]; @@ -1003,6 +1153,26 @@ static int eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, return 0; } +static int dev_set_mtu_inline(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized"); + return -1; + } + + struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + + struct flow_eth_dev *flw_dev = internals->flw_dev; + int ret = -1; + + if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE && mtu <= MAX_MTU) + ret = profile_inline_ops->flow_set_mtu_inline(flw_dev, internals->port, mtu); + + return ret ? -EINVAL : 0; +} + static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; @@ -1039,7 +1209,7 @@ eth_mac_addr_add(struct rte_eth_dev *eth_dev, if (index >= NUM_MAC_ADDRS_PER_PORT) { const struct pmd_internals *const internals = - (struct pmd_internals *)eth_dev->data->dev_private; + eth_dev->data->dev_private; NT_LOG_DBGX(DBG, NTNIC, "Port %i: illegal index %u (>= %u)", internals->n_intf_no, index, NUM_MAC_ADDRS_PER_PORT); return -1; @@ -1065,7 +1235,7 @@ eth_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { - struct pmd_internals *const internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *const internals = eth_dev->data->dev_private; struct rte_ether_addr *const mc_addrs = internals->mc_addrs; size_t i; @@ -1106,7 +1276,8 @@ eth_dev_start(struct rte_eth_dev *eth_dev) return -1; } - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + eth_dev->flow_fp_ops = get_dev_fp_flow_ops(); + struct pmd_internals *internals = eth_dev->data->dev_private; const int n_intf_no = internals->n_intf_no; struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; @@ -1167,7 +1338,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev) static int eth_dev_stop(struct rte_eth_dev *eth_dev) { - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; NT_LOG_DBGX(DBG, NTNIC, "Port %u", internals->n_intf_no); @@ -1195,7 +1366,7 @@ eth_dev_set_link_up(struct rte_eth_dev *eth_dev) return -1; } - struct pmd_internals *const internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *const internals = eth_dev->data->dev_private; struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; const int port = internals->n_intf_no; @@ -1221,7 +1392,7 @@ eth_dev_set_link_down(struct rte_eth_dev *eth_dev) return -1; } - struct pmd_internals *const internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *const internals = eth_dev->data->dev_private; struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info; const int port = internals->n_intf_no; @@ -1240,6 +1411,13 @@ eth_dev_set_link_down(struct rte_eth_dev *eth_dev) static void drv_deinit(struct drv_s *p_drv) { + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized"); + return; + } + const struct adapter_ops *adapter_ops = get_adapter_ops(); if (adapter_ops == NULL) { @@ -1251,6 +1429,30 @@ drv_deinit(struct drv_s *p_drv) return; ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info; + + /* + * Mark the global pdrv for cleared. Used by some threads to terminate. + * 1 second to give the threads a chance to see the termonation. + */ + clear_pdrv(p_drv); + nt_os_wait_usec(1000000); + + /* stop statistics threads */ + p_drv->ntdrv.b_shutdown = true; + THREAD_JOIN(p_nt_drv->stat_thread); + + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + THREAD_JOIN(p_nt_drv->flm_thread); + profile_inline_ops->flm_free_queues(); + THREAD_JOIN(p_nt_drv->port_event_thread); + /* Free all local flm event queues */ + flm_inf_sta_queue_free_all(FLM_INFO_LOCAL); + /* Free all remote flm event queues */ + flm_inf_sta_queue_free_all(FLM_INFO_REMOTE); + /* Free all aged flow event queues */ + flm_age_queue_free_all(); + } /* stop adapter */ adapter_ops->deinit(&p_nt_drv->adapter_info); @@ -1263,7 +1465,7 @@ drv_deinit(struct drv_s *p_drv) static int eth_dev_close(struct rte_eth_dev *eth_dev) { - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; struct drv_s *p_drv = internals->p_drv; if (internals->type != PORT_TYPE_VIRTUAL) { @@ -1301,7 +1503,7 @@ eth_dev_close(struct rte_eth_dev *eth_dev) static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, size_t fw_size) { - struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private; + struct pmd_internals *internals = eth_dev->data->dev_private; if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) return 0; @@ -1321,6 +1523,119 @@ eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, size_t fw_size } } +static int dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_ops **ops) +{ + *ops = get_dev_flow_ops(); + return 0; +} + +static int eth_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct drv_s *p_drv = internals->p_drv; + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + int if_index = internals->n_intf_no; + int nb_xstats; + + const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops(); + + if (ntnic_xstats_ops == NULL) { + NT_LOG(INF, NTNIC, "ntnic_xstats module not included"); + return -1; + } + + rte_spinlock_lock(&p_nt_drv->stat_lck); + nb_xstats = ntnic_xstats_ops->nthw_xstats_get(p_nt4ga_stat, stats, n, if_index); + rte_spinlock_unlock(&p_nt_drv->stat_lck); + return nb_xstats; +} + +static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev, + const uint64_t *ids, + uint64_t *values, + unsigned int n) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct drv_s *p_drv = internals->p_drv; + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + int if_index = internals->n_intf_no; + int nb_xstats; + + const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops(); + + if (ntnic_xstats_ops == NULL) { + NT_LOG(INF, NTNIC, "ntnic_xstats module not included"); + return -1; + } + + rte_spinlock_lock(&p_nt_drv->stat_lck); + nb_xstats = + ntnic_xstats_ops->nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n, if_index); + rte_spinlock_unlock(&p_nt_drv->stat_lck); + return nb_xstats; +} + +static int eth_xstats_reset(struct rte_eth_dev *eth_dev) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct drv_s *p_drv = internals->p_drv; + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + int if_index = internals->n_intf_no; + + struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops(); + + if (ntnic_xstats_ops == NULL) { + NT_LOG(INF, NTNIC, "ntnic_xstats module not included"); + return -1; + } + + rte_spinlock_lock(&p_nt_drv->stat_lck); + ntnic_xstats_ops->nthw_xstats_reset(p_nt4ga_stat, if_index); + rte_spinlock_unlock(&p_nt_drv->stat_lck); + return dpdk_stats_reset(internals, p_nt_drv, if_index); +} + +static int eth_xstats_get_names(struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, unsigned int size) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct drv_s *p_drv = internals->p_drv; + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + + const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops(); + + if (ntnic_xstats_ops == NULL) { + NT_LOG(INF, NTNIC, "ntnic_xstats module not included"); + return -1; + } + + return ntnic_xstats_ops->nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size); +} + +static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, + const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct drv_s *p_drv = internals->p_drv; + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops(); + + if (ntnic_xstats_ops == NULL) { + NT_LOG(INF, NTNIC, "ntnic_xstats module not included"); + return -1; + } + + return ntnic_xstats_ops->nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, + size); +} + static int promiscuous_enable(struct rte_eth_dev __rte_unused(*dev)) { @@ -1328,7 +1643,72 @@ promiscuous_enable(struct rte_eth_dev __rte_unused(*dev)) return 0; } -static const struct eth_dev_ops nthw_eth_dev_ops = { +static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + struct flow_nic_dev *ndev = internals->flw_dev->ndev; + struct nt_eth_rss_conf tmp_rss_conf = { 0 }; + const int hsh_idx = 0; /* hsh index 0 means the default receipt in HSH module */ + + if (rss_conf->rss_key != NULL) { + if (rss_conf->rss_key_len > MAX_RSS_KEY_LEN) { + NT_LOG(ERR, NTNIC, + "ERROR: - RSS hash key length %u exceeds maximum value %u", + rss_conf->rss_key_len, MAX_RSS_KEY_LEN); + return -1; + } + + rte_memcpy(&tmp_rss_conf.rss_key, rss_conf->rss_key, rss_conf->rss_key_len); + } + + tmp_rss_conf.algorithm = rss_conf->algorithm; + + tmp_rss_conf.rss_hf = rss_conf->rss_hf; + int res = flow_filter_ops->flow_nic_set_hasher_fields(ndev, hsh_idx, tmp_rss_conf); + + if (res == 0) { + flow_filter_ops->hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1); + rte_memcpy(&ndev->rss_conf, &tmp_rss_conf, sizeof(struct nt_eth_rss_conf)); + + } else { + NT_LOG(ERR, NTNIC, "ERROR: - RSS hash update failed with error %i", res); + } + + return res; +} + +static int rss_hash_conf_get(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct flow_nic_dev *ndev = internals->flw_dev->ndev; + + rss_conf->algorithm = (enum rte_eth_hash_function)ndev->rss_conf.algorithm; + + rss_conf->rss_hf = ndev->rss_conf.rss_hf; + + /* + * copy full stored key into rss_key and pad it with + * zeros up to rss_key_len / MAX_RSS_KEY_LEN + */ + if (rss_conf->rss_key != NULL) { + int key_len = RTE_MIN(rss_conf->rss_key_len, MAX_RSS_KEY_LEN); + memset(rss_conf->rss_key, 0, rss_conf->rss_key_len); + rte_memcpy(rss_conf->rss_key, &ndev->rss_conf.rss_key, key_len); + rss_conf->rss_key_len = key_len; + } + + return 0; +} + +static struct eth_dev_ops nthw_eth_dev_ops = { .dev_configure = eth_dev_configure, .dev_start = eth_dev_start, .dev_stop = eth_dev_stop, @@ -1336,6 +1716,8 @@ static const struct eth_dev_ops nthw_eth_dev_ops = { .dev_set_link_down = eth_dev_set_link_down, .dev_close = eth_dev_close, .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, .dev_infos_get = eth_dev_infos_get, .fw_version_get = eth_fw_version_get, .rx_queue_setup = eth_rx_scg_queue_setup, @@ -1349,12 +1731,286 @@ static const struct eth_dev_ops nthw_eth_dev_ops = { .mac_addr_add = eth_mac_addr_add, .mac_addr_set = eth_mac_addr_set, .set_mc_addr_list = eth_set_mc_addr_list, + .mtr_ops_get = NULL, + .flow_ops_get = dev_flow_ops_get, + .xstats_get = eth_xstats_get, + .xstats_get_names = eth_xstats_get_names, + .xstats_reset = eth_xstats_reset, + .xstats_get_by_id = eth_xstats_get_by_id, + .xstats_get_names_by_id = eth_xstats_get_names_by_id, + .mtu_set = NULL, .promiscuous_enable = promiscuous_enable, + .rss_hash_update = eth_dev_rss_hash_update, + .rss_hash_conf_get = rss_hash_conf_get, }; +/* + * Port event thread + */ +THREAD_FUNC port_event_thread_fn(void *context) +{ + struct pmd_internals *internals = context; + struct drv_s *p_drv = internals->p_drv; + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info; + struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device; + + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + struct rte_eth_dev *eth_dev = &rte_eth_devices[internals->port_id]; + uint8_t port_no = internals->port; + + ntnic_flm_load_t flmdata; + ntnic_port_load_t portdata; + + memset(&flmdata, 0, sizeof(flmdata)); + memset(&portdata, 0, sizeof(portdata)); + + while (ndev != NULL && ndev->eth_base == NULL) + nt_os_wait_usec(1 * 1000 * 1000); + + while (!p_drv->ntdrv.b_shutdown) { + /* + * FLM load measurement + * Do only send event, if there has been a change + */ + if (p_nt4ga_stat->flm_stat_ver > 22 && p_nt4ga_stat->mp_stat_structs_flm) { + if (flmdata.lookup != p_nt4ga_stat->mp_stat_structs_flm->load_lps || + flmdata.access != p_nt4ga_stat->mp_stat_structs_flm->load_aps) { + rte_spinlock_lock(&p_nt_drv->stat_lck); + flmdata.lookup = p_nt4ga_stat->mp_stat_structs_flm->load_lps; + flmdata.access = p_nt4ga_stat->mp_stat_structs_flm->load_aps; + flmdata.lookup_maximum = + p_nt4ga_stat->mp_stat_structs_flm->max_lps; + flmdata.access_maximum = + p_nt4ga_stat->mp_stat_structs_flm->max_aps; + rte_spinlock_unlock(&p_nt_drv->stat_lck); + + if (eth_dev && eth_dev->data && eth_dev->data->dev_private) { + rte_eth_dev_callback_process(eth_dev, + (enum rte_eth_event_type)RTE_NTNIC_FLM_LOAD_EVENT, + &flmdata); + } + } + } + + /* + * Port load measurement + * Do only send event, if there has been a change. + */ + if (p_nt4ga_stat->mp_port_load) { + if (portdata.rx_bps != p_nt4ga_stat->mp_port_load[port_no].rx_bps || + portdata.tx_bps != p_nt4ga_stat->mp_port_load[port_no].tx_bps) { + rte_spinlock_lock(&p_nt_drv->stat_lck); + portdata.rx_bps = p_nt4ga_stat->mp_port_load[port_no].rx_bps; + portdata.tx_bps = p_nt4ga_stat->mp_port_load[port_no].tx_bps; + portdata.rx_pps = p_nt4ga_stat->mp_port_load[port_no].rx_pps; + portdata.tx_pps = p_nt4ga_stat->mp_port_load[port_no].tx_pps; + portdata.rx_pps_maximum = + p_nt4ga_stat->mp_port_load[port_no].rx_pps_max; + portdata.tx_pps_maximum = + p_nt4ga_stat->mp_port_load[port_no].tx_pps_max; + portdata.rx_bps_maximum = + p_nt4ga_stat->mp_port_load[port_no].rx_bps_max; + portdata.tx_bps_maximum = + p_nt4ga_stat->mp_port_load[port_no].tx_bps_max; + rte_spinlock_unlock(&p_nt_drv->stat_lck); + + if (eth_dev && eth_dev->data && eth_dev->data->dev_private) { + rte_eth_dev_callback_process(eth_dev, + (enum rte_eth_event_type)RTE_NTNIC_PORT_LOAD_EVENT, + &portdata); + } + } + } + + /* Process events */ + { + int count = 0; + bool do_wait = true; + + while (count < 5000) { + /* Local FLM statistic events */ + struct flm_info_event_s data; + + if (flm_inf_queue_get(port_no, FLM_INFO_LOCAL, &data) == 0) { + if (eth_dev && eth_dev->data && + eth_dev->data->dev_private) { + struct ntnic_flm_statistic_s event_data; + event_data.bytes = data.bytes; + event_data.packets = data.packets; + event_data.cause = data.cause; + event_data.id = data.id; + event_data.timestamp = data.timestamp; + rte_eth_dev_callback_process(eth_dev, + (enum rte_eth_event_type) + RTE_NTNIC_FLM_STATS_EVENT, + &event_data); + do_wait = false; + } + } + + /* AGED event */ + /* Note: RTE_FLOW_PORT_FLAG_STRICT_QUEUE flag is not supported so + * event is always generated + */ + int aged_event_count = flm_age_event_get(port_no); + + if (aged_event_count > 0 && eth_dev && eth_dev->data && + eth_dev->data->dev_private) { + rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_FLOW_AGED, + NULL); + flm_age_event_clear(port_no); + do_wait = false; + } + + if (do_wait) + nt_os_wait_usec(10); + + count++; + do_wait = true; + } + } + } + + return THREAD_RETURN; +} + +/* + * Adapter flm stat thread + */ +THREAD_FUNC adapter_flm_update_thread_fn(void *context) +{ + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG(ERR, NTNIC, "%s: profile_inline module uninitialized", __func__); + return THREAD_RETURN; + } + + struct drv_s *p_drv = context; + + struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv; + struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info; + struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter; + struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device; + + NT_LOG(DBG, NTNIC, "%s: %s: waiting for port configuration", + p_adapter_info->mp_adapter_id_str, __func__); + + while (p_flow_nic_dev->eth_base == NULL) + nt_os_wait_usec(1 * 1000 * 1000); + + struct flow_eth_dev *dev = p_flow_nic_dev->eth_base; + + NT_LOG(DBG, NTNIC, "%s: %s: begin", p_adapter_info->mp_adapter_id_str, __func__); + + while (!p_drv->ntdrv.b_shutdown) + if (profile_inline_ops->flm_update(dev) == 0) + nt_os_wait_usec(10); + + NT_LOG(DBG, NTNIC, "%s: %s: end", p_adapter_info->mp_adapter_id_str, __func__); + return THREAD_RETURN; +} + +/* + * Adapter stat thread + */ +THREAD_FUNC adapter_stat_thread_fn(void *context) +{ + const struct nt4ga_stat_ops *nt4ga_stat_ops = get_nt4ga_stat_ops(); + + if (nt4ga_stat_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "Statistics module uninitialized"); + return THREAD_RETURN; + } + + struct drv_s *p_drv = context; + + ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat; + const char *const p_adapter_id_str = p_nt_drv->adapter_info.mp_adapter_id_str; + (void)p_adapter_id_str; + + if (!p_nthw_stat) + return THREAD_RETURN; + + NT_LOG_DBGX(DBG, NTNIC, "%s: begin", p_adapter_id_str); + + assert(p_nthw_stat); + + while (!p_drv->ntdrv.b_shutdown) { + nt_os_wait_usec(10 * 1000); + + nthw_stat_trigger(p_nthw_stat); + + uint32_t loop = 0; + + while ((!p_drv->ntdrv.b_shutdown) && + (*p_nthw_stat->mp_timestamp == (uint64_t)-1)) { + nt_os_wait_usec(1 * 100); + + if (rte_log_get_level(nt_log_ntnic) == RTE_LOG_DEBUG && + (++loop & 0x3fff) == 0) { + if (p_nt4ga_stat->mp_nthw_rpf) { + NT_LOG(ERR, NTNIC, "Statistics DMA frozen"); + + } else if (p_nt4ga_stat->mp_nthw_rmc) { + uint32_t sf_ram_of = + nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat + ->mp_nthw_rmc); + uint32_t descr_fifo_of = + nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat + ->mp_nthw_rmc); + + uint32_t dbg_merge = + nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc); + uint32_t mac_if_err = + nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc); + + NT_LOG(ERR, NTNIC, "Statistics DMA frozen"); + NT_LOG(ERR, NTNIC, "SF RAM Overflow : %08x", + sf_ram_of); + NT_LOG(ERR, NTNIC, "Descr Fifo Overflow : %08x", + descr_fifo_of); + NT_LOG(ERR, NTNIC, "DBG Merge : %08x", + dbg_merge); + NT_LOG(ERR, NTNIC, "MAC If Errors : %08x", + mac_if_err); + } + } + } + + /* Check then collect */ + { + rte_spinlock_lock(&p_nt_drv->stat_lck); + nt4ga_stat_ops->nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat); + rte_spinlock_unlock(&p_nt_drv->stat_lck); + } + } + + NT_LOG_DBGX(DBG, NTNIC, "%s: end", p_adapter_id_str); + return THREAD_RETURN; +} + static int nthw_pci_dev_init(struct rte_pci_device *pci_dev) { + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized"); + /* Return statement is not necessary here to allow traffic processing by SW */ + } + + const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops(); + + if (profile_inline_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized"); + /* Return statement is not necessary here to allow traffic processing by SW */ + } + nt_vfio_init(); const struct port_ops *port_ops = get_port_ops(); @@ -1378,10 +2034,13 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) uint32_t n_port_mask = -1; /* All ports enabled by default */ uint32_t nb_rx_queues = 1; uint32_t nb_tx_queues = 1; + uint32_t exception_path = 0; struct flow_queue_id_s queue_ids[MAX_QUEUES]; int n_phy_ports; struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 }; int num_port_speeds = 0; + enum flow_eth_dev_profile profile = FLOW_ETH_DEV_PROFILE_INLINE; + NT_LOG_DBGX(DBG, NTNIC, "Dev %s PF #%i Init : %02x:%02x:%i", pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); @@ -1537,6 +2196,14 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) return -1; } + const struct meter_ops_s *meter_ops = get_meter_ops(); + + if (meter_ops != NULL) + nthw_eth_dev_ops.mtr_ops_get = meter_ops->eth_mtr_ops_get; + + else + NT_LOG(DBG, NTNIC, "Meter module is not initialized"); + /* Initialize the queue system */ if (err == 0) { sg_ops = get_sg_ops(); @@ -1580,6 +2247,28 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) return -1; } + if (profile_inline_ops != NULL && fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + profile_inline_ops->flm_setup_queues(); + res = THREAD_CTRL_CREATE(&p_nt_drv->flm_thread, "ntnic-nt_flm_update_thr", + adapter_flm_update_thread_fn, (void *)p_drv); + + if (res) { + NT_LOG_DBGX(ERR, NTNIC, "%s: error=%d", + (pci_dev->name[0] ? pci_dev->name : "NA"), res); + return -1; + } + } + + rte_spinlock_init(&p_nt_drv->stat_lck); + res = THREAD_CTRL_CREATE(&p_nt_drv->stat_thread, "nt4ga_stat_thr", adapter_stat_thread_fn, + (void *)p_drv); + + if (res) { + NT_LOG(ERR, NTNIC, "%s: error=%d", + (pci_dev->name[0] ? pci_dev->name : "NA"), res); + return -1; + } + n_phy_ports = fpga_info->n_phy_ports; for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) { @@ -1613,6 +2302,7 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) internals->pci_dev = pci_dev; internals->n_intf_no = n_intf_no; internals->type = PORT_TYPE_PHYSICAL; + internals->port = n_intf_no; internals->nb_rx_queues = nb_rx_queues; internals->nb_tx_queues = nb_tx_queues; @@ -1681,6 +2371,18 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) return -1; } + if (flow_filter_ops != NULL) { + internals->flw_dev = flow_filter_ops->flow_get_eth_dev(0, n_intf_no, + eth_dev->data->port_id, nb_rx_queues, queue_ids, + &internals->txq_scg[0].rss_target_id, profile, exception_path); + + if (!internals->flw_dev) { + NT_LOG(ERR, NTNIC, + "Error creating port. Resource exhaustion in HW"); + return -1; + } + } + /* connect structs */ internals->p_drv = p_drv; eth_dev->data->dev_private = internals; @@ -1709,6 +2411,33 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) /* increase initialized ethernet devices - PF */ p_drv->n_eth_dev_init_count++; + + if (get_flow_filter_ops() != NULL) { + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE && + internals->flw_dev->ndev->be.tpe.ver >= 2) { + assert(nthw_eth_dev_ops.mtu_set == dev_set_mtu_inline || + nthw_eth_dev_ops.mtu_set == NULL); + nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline; + dev_set_mtu_inline(eth_dev, MTUINITVAL); + NT_LOG_DBGX(DBG, NTNIC, "INLINE MTU supported, tpe version %d", + internals->flw_dev->ndev->be.tpe.ver); + + } else { + NT_LOG(DBG, NTNIC, "INLINE MTU not supported"); + } + } + + /* Port event thread */ + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + res = THREAD_CTRL_CREATE(&p_nt_drv->port_event_thread, "nt_port_event_thr", + port_event_thread_fn, (void *)internals); + + if (res) { + NT_LOG(ERR, NTNIC, "%s: error=%d", + (pci_dev->name[0] ? pci_dev->name : "NA"), res); + return -1; + } + } } return 0; @@ -1758,6 +2487,48 @@ nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused) return 0; } +static void signal_handler_func_int(int sig) +{ + if (sig != SIGINT) { + signal(sig, previous_handler); + raise(sig); + return; + } + + kill_pmd = 1; +} + +THREAD_FUNC shutdown_thread(void *arg __rte_unused) +{ + while (!kill_pmd) + nt_os_wait_usec(100 * 1000); + + NT_LOG_DBGX(DBG, NTNIC, "Shutting down because of ctrl+C"); + + signal(SIGINT, previous_handler); + raise(SIGINT); + + return THREAD_RETURN; +} + +static int init_shutdown(void) +{ + NT_LOG(DBG, NTNIC, "Starting shutdown handler"); + kill_pmd = 0; + previous_handler = signal(SIGINT, signal_handler_func_int); + THREAD_CREATE(&shutdown_tid, shutdown_thread, NULL); + + /* + * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll + * flooding by OVS from multiple virtual port threads - no need to be precise + */ + uint64_t now_rtc = rte_get_tsc_cycles(); + nt_os_wait_usec(10 * 1000); + rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc); + + return 0; +} + static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -1800,6 +2571,8 @@ nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, ret = nthw_pci_dev_init(pci_dev); + init_shutdown(); + NT_LOG_DBGX(DBG, NTNIC, "leave: ret=%d", ret); return ret; } diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c new file mode 100644 index 0000000000..4c8503f689 --- /dev/null +++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c @@ -0,0 +1,1346 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include +#include +#include + +#include "ntlog.h" +#include "nt_util.h" +#include "create_elements.h" +#include "ntnic_mod_reg.h" +#include "ntos_system.h" +#include "ntos_drv.h" + +#define MAX_RTE_FLOWS 8192 + +#define MAX_COLOR_FLOW_STATS 0x400 +#define NT_MAX_COLOR_FLOW_STATS 0x400 + +#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS) +#error Difference in COLOR_FLOW_STATS. Please synchronize the defines. +#endif + +static struct rte_flow nt_flows[MAX_RTE_FLOWS]; + +rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER; +static struct rte_flow nt_flows[MAX_RTE_FLOWS]; + +int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size, struct rte_flow_item *out) +{ + int hdri = 0; + int pkti = 0; + + /* Ethernet */ + if (size - pkti == 0) + goto interpret_end; + + if (size - pkti < (int)sizeof(struct rte_ether_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_ETH; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + rte_be16_t ether_type = ((struct rte_ether_hdr *)&data[pkti])->ether_type; + + hdri += 1; + pkti += sizeof(struct rte_ether_hdr); + + if (size - pkti == 0) + goto interpret_end; + + /* VLAN */ + while (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) || + ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) || + ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) { + if (size - pkti == 0) + goto interpret_end; + + if (size - pkti < (int)sizeof(struct rte_vlan_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_VLAN; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + ether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto; + + hdri += 1; + pkti += sizeof(struct rte_vlan_hdr); + } + + if (size - pkti == 0) + goto interpret_end; + + /* Layer 3 */ + uint8_t next_header = 0; + + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) && (data[pkti] & 0xF0) == 0x40) { + if (size - pkti < (int)sizeof(struct rte_ipv4_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_IPV4; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + next_header = data[pkti + 9]; + + hdri += 1; + pkti += sizeof(struct rte_ipv4_hdr); + + } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) && + (data[pkti] & 0xF0) == 0x60) { + if (size - pkti < (int)sizeof(struct rte_ipv6_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_IPV6; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + next_header = data[pkti + 6]; + + hdri += 1; + pkti += sizeof(struct rte_ipv6_hdr); + } else { + return -1; + } + + if (size - pkti == 0) + goto interpret_end; + + /* Layer 4 */ + int gtpu_encap = 0; + + if (next_header == 1) { /* ICMP */ + if (size - pkti < (int)sizeof(struct rte_icmp_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_ICMP; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + hdri += 1; + pkti += sizeof(struct rte_icmp_hdr); + + } else if (next_header == 58) { /* ICMP6 */ + if (size - pkti < (int)sizeof(struct rte_flow_item_icmp6)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_ICMP6; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + hdri += 1; + pkti += sizeof(struct rte_icmp_hdr); + + } else if (next_header == 6) { /* TCP */ + if (size - pkti < (int)sizeof(struct rte_tcp_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_TCP; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + hdri += 1; + pkti += sizeof(struct rte_tcp_hdr); + + } else if (next_header == 17) { /* UDP */ + if (size - pkti < (int)sizeof(struct rte_udp_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_UDP; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + gtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port == + rte_cpu_to_be_16(RTE_GTPU_UDP_PORT); + + hdri += 1; + pkti += sizeof(struct rte_udp_hdr); + + } else if (next_header == 132) {/* SCTP */ + if (size - pkti < (int)sizeof(struct rte_sctp_hdr)) + return -1; + + out[hdri].type = RTE_FLOW_ITEM_TYPE_SCTP; + out[hdri].spec = &data[pkti]; + out[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + hdri += 1; + pkti += sizeof(struct rte_sctp_hdr); + + } else { + return -1; + } + + if (size - pkti == 0) + goto interpret_end; + + /* GTPv1-U */ + if (gtpu_encap) { + if (size - pkti < (int)sizeof(struct rte_gtp_hdr)) + return -1; + + out[hdri] + .type = RTE_FLOW_ITEM_TYPE_GTP; + out[hdri] + .spec = &data[pkti]; + out[hdri] + .mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + int extension_present_bit = ((struct rte_gtp_hdr *)&data[pkti]) + ->e; + + hdri += 1; + pkti += sizeof(struct rte_gtp_hdr); + + if (extension_present_bit) { + if (size - pkti < (int)sizeof(struct rte_gtp_hdr_ext_word)) + return -1; + + out[hdri] + .type = RTE_FLOW_ITEM_TYPE_GTP; + out[hdri] + .spec = &data[pkti]; + out[hdri] + .mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + uint8_t next_ext = ((struct rte_gtp_hdr_ext_word *)&data[pkti]) + ->next_ext; + + hdri += 1; + pkti += sizeof(struct rte_gtp_hdr_ext_word); + + while (next_ext) { + size_t ext_len = data[pkti] * 4; + + if (size - pkti < (int)ext_len) + return -1; + + out[hdri] + .type = RTE_FLOW_ITEM_TYPE_GTP; + out[hdri] + .spec = &data[pkti]; + out[hdri] + .mask = (preserve != NULL) ? &preserve[pkti] : NULL; + + next_ext = data[pkti + ext_len - 1]; + + hdri += 1; + pkti += ext_len; + } + } + } + + if (size - pkti != 0) + return -1; + +interpret_end: + out[hdri].type = RTE_FLOW_ITEM_TYPE_END; + out[hdri].spec = NULL; + out[hdri].mask = NULL; + + return hdri + 1; +} + +int convert_error(struct rte_flow_error *error, struct rte_flow_error *rte_flow_error) +{ + if (error) { + error->cause = NULL; + error->message = rte_flow_error->message; + + if (rte_flow_error->type == RTE_FLOW_ERROR_TYPE_NONE || + rte_flow_error->type == RTE_FLOW_ERROR_TYPE_NONE) + error->type = RTE_FLOW_ERROR_TYPE_NONE; + + else + error->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED; + } + + return 0; +} + +int create_attr(struct cnv_attr_s *attribute, const struct rte_flow_attr *attr) +{ + memset(&attribute->attr, 0x0, sizeof(struct rte_flow_attr)); + + if (attr) { + attribute->attr.group = attr->group; + attribute->attr.priority = attr->priority; + } + + return 0; +} + +int create_match_elements(struct cnv_match_s *match, const struct rte_flow_item items[], + int max_elem) +{ + int eidx = 0; + int iter_idx = 0; + int type = -1; + + if (!items) { + NT_LOG(ERR, FILTER, "ERROR no items to iterate!"); + return -1; + } + + do { + type = items[iter_idx].type; + + if (type < 0) { + if ((int)items[iter_idx].type == NT_RTE_FLOW_ITEM_TYPE_TUNNEL) { + type = NT_RTE_FLOW_ITEM_TYPE_TUNNEL; + + } else { + NT_LOG(ERR, FILTER, "ERROR unknown item type received!"); + return -1; + } + } + + if (type >= 0) { + if (items[iter_idx].last) { + /* Ranges are not supported yet */ + NT_LOG(ERR, FILTER, "ERROR ITEM-RANGE SETUP - NOT SUPPORTED!"); + return -1; + } + + if (eidx == max_elem) { + NT_LOG(ERR, FILTER, "ERROR TOO MANY ELEMENTS ENCOUNTERED!"); + return -1; + } + + match->rte_flow_item[eidx].type = type; + match->rte_flow_item[eidx].spec = items[iter_idx].spec; + match->rte_flow_item[eidx].mask = items[iter_idx].mask; + + eidx++; + iter_idx++; + } + + } while (type >= 0 && type != RTE_FLOW_ITEM_TYPE_END); + + return (type >= 0) ? 0 : -1; +} + +int create_action_elements_inline(struct cnv_action_s *action, + const struct rte_flow_action actions[], + int max_elem, + uint32_t queue_offset) +{ + int aidx = 0; + int type = -1; + + do { + type = actions[aidx].type; + if (type >= 0) { + action->flow_actions[aidx].type = type; + + /* + * Non-compatible actions handled here + */ + switch (type) { + case RTE_FLOW_ACTION_TYPE_RSS: { + const struct rte_flow_action_rss *rss = + (const struct rte_flow_action_rss *)actions[aidx].conf; + + switch (rss->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + action->flow_rss.func = + (enum rte_eth_hash_function) + RTE_ETH_HASH_FUNCTION_DEFAULT; + break; + + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + action->flow_rss.func = + (enum rte_eth_hash_function) + RTE_ETH_HASH_FUNCTION_TOEPLITZ; + + if (rte_is_power_of_2(rss->queue_num) == 0) { + NT_LOG(ERR, FILTER, + "RTE ACTION RSS - for Toeplitz the number of queues must be power of two"); + return -1; + } + + break; + + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT: + case RTE_ETH_HASH_FUNCTION_MAX: + default: + NT_LOG(ERR, FILTER, + "RTE ACTION RSS - unsupported function: %u", + rss->func); + return -1; + } + + uint64_t tmp_rss_types = 0; + + switch (rss->level) { + case 1: + /* clear/override level mask specified at types */ + tmp_rss_types = rss->types & (~RTE_ETH_RSS_LEVEL_MASK); + action->flow_rss.types = + tmp_rss_types | RTE_ETH_RSS_LEVEL_OUTERMOST; + break; + + case 2: + /* clear/override level mask specified at types */ + tmp_rss_types = rss->types & (~RTE_ETH_RSS_LEVEL_MASK); + action->flow_rss.types = + tmp_rss_types | RTE_ETH_RSS_LEVEL_INNERMOST; + break; + + case 0: + /* keep level mask specified at types */ + action->flow_rss.types = rss->types; + break; + + default: + NT_LOG(ERR, FILTER, + "RTE ACTION RSS - unsupported level: %u", + rss->level); + return -1; + } + + action->flow_rss.level = 0; + action->flow_rss.key_len = rss->key_len; + action->flow_rss.queue_num = rss->queue_num; + action->flow_rss.key = rss->key; + action->flow_rss.queue = rss->queue; + action->flow_actions[aidx].conf = &action->flow_rss; + } + break; + + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: { + const struct rte_flow_action_raw_decap *decap = + (const struct rte_flow_action_raw_decap *)actions[aidx] + .conf; + int item_count = interpret_raw_data(decap->data, NULL, decap->size, + action->decap.items); + + if (item_count < 0) + return item_count; + action->decap.data = decap->data; + action->decap.size = decap->size; + action->decap.item_count = item_count; + action->flow_actions[aidx].conf = &action->decap; + } + break; + + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: { + const struct rte_flow_action_raw_encap *encap = + (const struct rte_flow_action_raw_encap *)actions[aidx] + .conf; + int item_count = interpret_raw_data(encap->data, encap->preserve, + encap->size, action->encap.items); + + if (item_count < 0) + return item_count; + action->encap.data = encap->data; + action->encap.preserve = encap->preserve; + action->encap.size = encap->size; + action->encap.item_count = item_count; + action->flow_actions[aidx].conf = &action->encap; + } + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: { + const struct rte_flow_action_queue *queue = + (const struct rte_flow_action_queue *)actions[aidx].conf; + action->queue.index = queue->index + queue_offset; + action->flow_actions[aidx].conf = &action->queue; + } + break; + + default: { + action->flow_actions[aidx].conf = actions[aidx].conf; + } + break; + } + + aidx++; + + if (aidx == max_elem) + return -1; + } + + } while (type >= 0 && type != RTE_FLOW_ITEM_TYPE_END); + + return (type >= 0) ? 0 : -1; +} + +static inline uint16_t get_caller_id(uint16_t port) +{ + return MAX_VDPA_PORTS + port + 1; +} + +static int is_flow_handle_typecast(struct rte_flow *flow) +{ + const void *first_element = &nt_flows[0]; + const void *last_element = &nt_flows[MAX_RTE_FLOWS - 1]; + return (void *)flow < first_element || (void *)flow > last_element; +} + +static int convert_flow(struct rte_eth_dev *eth_dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct cnv_attr_s *attribute, + struct cnv_match_s *match, + struct cnv_action_s *action, + struct rte_flow_error *error) +{ + struct pmd_internals *internals = eth_dev->data->dev_private; + struct fpga_info_s *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, .message = "none" }; + uint32_t queue_offset = 0; + + /* Set initial error */ + convert_error(error, &flow_error); + + if (!internals) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Missing eth_dev"); + return -1; + } + + if (internals->type == PORT_TYPE_OVERRIDE && internals->vpq_nb_vq > 0) { + /* + * The queues coming from the main PMD will always start from 0 + * When the port is a the VF/vDPA port the queues must be changed + * to match the queues allocated for VF/vDPA. + */ + queue_offset = internals->vpq[0].id; + } + + if (create_attr(attribute, attr) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, "Error in attr"); + return -1; + } + + if (create_match_elements(match, items, MAX_ELEMENTS) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Error in items"); + return -1; + } + + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + if (create_action_elements_inline(action, actions, + MAX_ACTIONS, queue_offset) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Error in actions"); + return -1; + } + + } else { + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Unsupported adapter profile"); + return -1; + } + + return 0; +} + +static int +eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow, struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, .message = "none" }; + int res = 0; + /* Set initial error */ + convert_error(error, &flow_error); + + if (!flow) + return 0; + + if (is_flow_handle_typecast(flow)) { + res = flow_filter_ops->flow_destroy(internals->flw_dev, (void *)flow, &flow_error); + convert_error(error, &flow_error); + + } else { + res = flow_filter_ops->flow_destroy(internals->flw_dev, flow->flw_hdl, + &flow_error); + convert_error(error, &flow_error); + + rte_spinlock_lock(&flow_lock); + flow->used = 0; + rte_spinlock_unlock(&flow_lock); + } + + return res; +} + +static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return NULL; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + struct fpga_info_s *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info; + + struct cnv_attr_s attribute = { 0 }; + struct cnv_match_s match = { 0 }; + struct cnv_action_s action = { 0 }; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, .message = "none" }; + uint32_t flow_stat_id = 0; + + if (convert_flow(eth_dev, attr, items, actions, &attribute, &match, &action, error) < 0) + return NULL; + + /* Main application caller_id is port_id shifted above VF ports */ + attribute.caller_id = get_caller_id(eth_dev->data->port_id); + + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE && attribute.attr.group > 0) { + void *flw_hdl = flow_filter_ops->flow_create(internals->flw_dev, &attribute.attr, + attribute.forced_vlan_vid, attribute.caller_id, + match.rte_flow_item, action.flow_actions, + &flow_error); + convert_error(error, &flow_error); + return (struct rte_flow *)flw_hdl; + } + + struct rte_flow *flow = NULL; + rte_spinlock_lock(&flow_lock); + int i; + + for (i = 0; i < MAX_RTE_FLOWS; i++) { + if (!nt_flows[i].used) { + nt_flows[i].flow_stat_id = flow_stat_id; + + if (nt_flows[i].flow_stat_id < NT_MAX_COLOR_FLOW_STATS) { + nt_flows[i].used = 1; + flow = &nt_flows[i]; + } + + break; + } + } + + rte_spinlock_unlock(&flow_lock); + + if (flow) { + flow->flw_hdl = flow_filter_ops->flow_create(internals->flw_dev, &attribute.attr, + attribute.forced_vlan_vid, attribute.caller_id, + match.rte_flow_item, action.flow_actions, + &flow_error); + convert_error(error, &flow_error); + + if (!flow->flw_hdl) { + rte_spinlock_lock(&flow_lock); + flow->used = 0; + flow = NULL; + rte_spinlock_unlock(&flow_lock); + + } else { + rte_spinlock_lock(&flow_lock); + flow->caller_id = attribute.caller_id; + rte_spinlock_unlock(&flow_lock); + } + } + + return flow; +} + +static int eth_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, .message = "none" }; + int res = 0; + /* Main application caller_id is port_id shifted above VDPA ports */ + uint16_t caller_id = get_caller_id(eth_dev->data->port_id); + + if (internals->flw_dev) { + res = flow_filter_ops->flow_flush(internals->flw_dev, caller_id, &flow_error); + rte_spinlock_lock(&flow_lock); + + for (int flow = 0; flow < MAX_RTE_FLOWS; flow++) { + if (nt_flows[flow].used && nt_flows[flow].caller_id == caller_id) { + /* Cleanup recorded flows */ + nt_flows[flow].used = 0; + nt_flows[flow].caller_id = 0; + nt_flows[flow].stat_bytes = 0UL; + nt_flows[flow].stat_pkts = 0UL; + nt_flows[flow].stat_tcp_flags = 0; + } + } + + rte_spinlock_unlock(&flow_lock); + } + + convert_error(error, &flow_error); + + return res; +} + +static int eth_flow_actions_update(struct rte_eth_dev *eth_dev, + struct rte_flow *flow, + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + static struct rte_flow_error flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + int res = -1; + + if (internals->flw_dev) { + struct pmd_internals *dev_private = + eth_dev->data->dev_private; + struct fpga_info_s *fpga_info = &dev_private->p_drv->ntdrv.adapter_info.fpga_info; + struct cnv_action_s action = { 0 }; + + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + uint32_t queue_offset = 0; + + if (dev_private->type == PORT_TYPE_OVERRIDE && + dev_private->vpq_nb_vq > 0) { + /* + * The queues coming from the main PMD will always start from 0 + * When the port is a the VF/vDPA port the queues must be changed + * to match the queues allocated for VF/vDPA. + */ + queue_offset = dev_private->vpq[0].id; + } + + if (create_action_elements_inline(&action, actions, MAX_ACTIONS, + queue_offset) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Error in actions"); + return -1; + } + } + + if (is_flow_handle_typecast(flow)) { + res = flow_filter_ops->flow_actions_update(internals->flw_dev, + (void *)flow, + action.flow_actions, + &flow_error); + + } else { + res = flow_filter_ops->flow_actions_update(internals->flw_dev, + flow->flw_hdl, + action.flow_actions, + &flow_error); + } + } + + convert_error(error, &flow_error); + + return res; +} + +static int eth_flow_dev_dump(struct rte_eth_dev *eth_dev, + struct rte_flow *flow, + FILE *file, + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG(ERR, NTNIC, "%s: flow_filter module uninitialized", __func__); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, .message = "none" }; + + uint16_t caller_id = get_caller_id(eth_dev->data->port_id); + + int res = flow_filter_ops->flow_dev_dump(internals->flw_dev, + is_flow_handle_typecast(flow) ? (void *)flow + : flow->flw_hdl, + caller_id, file, &flow_error); + + convert_error(error, &flow_error); + return res; +} + +static int eth_flow_get_aged_flows(struct rte_eth_dev *eth_dev, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = eth_dev->data->dev_private; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + uint16_t caller_id = get_caller_id(eth_dev->data->port_id); + + int res = flow_filter_ops->flow_get_aged_flows(internals->flw_dev, caller_id, context, + nb_contexts, &flow_error); + + convert_error(error, &flow_error); + return res; +} + +/* + * NT Flow asynchronous operations API + */ + +static int eth_flow_info_get(struct rte_eth_dev *dev, struct rte_flow_port_info *port_info, + struct rte_flow_queue_info *queue_info, struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + int res = flow_filter_ops->flow_info_get(internals->flw_dev, + get_caller_id(dev->data->port_id), + (struct rte_flow_port_info *)port_info, + (struct rte_flow_queue_info *)queue_info, + &flow_error); + + convert_error(error, &flow_error); + return res; +} + +static int eth_flow_configure(struct rte_eth_dev *dev, const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error flow_error = { + .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + int res = flow_filter_ops->flow_configure(internals->flw_dev, + get_caller_id(dev->data->port_id), + (const struct rte_flow_port_attr *)port_attr, + nb_queue, + (const struct rte_flow_queue_attr **)queue_attr, + &flow_error); + + convert_error(error, &flow_error); + return res; +} + +static struct rte_flow_pattern_template *eth_flow_pattern_template_create(struct rte_eth_dev *dev, + const struct rte_flow_pattern_template_attr *template_attr, + const struct rte_flow_item pattern[], struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return NULL; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + struct cnv_match_s match = { 0 }; + struct rte_flow_pattern_template_attr attr = { + .relaxed_matching = template_attr->relaxed_matching, + .ingress = template_attr->ingress, + .egress = template_attr->egress, + .transfer = template_attr->transfer, + }; + + uint16_t caller_id = get_caller_id(dev->data->port_id); + + if (create_match_elements(&match, pattern, MAX_ELEMENTS) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Error in pattern"); + return NULL; + } + + struct flow_pattern_template *res = + flow_filter_ops->flow_pattern_template_create(internals->flw_dev, &attr, caller_id, + match.rte_flow_item, &flow_error); + + convert_error(error, &flow_error); + return (struct rte_flow_pattern_template *)res; +} + +static int eth_flow_pattern_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_pattern_template *pattern_template, + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + int res = flow_filter_ops->flow_pattern_template_destroy(internals->flw_dev, + (struct flow_pattern_template *) + pattern_template, + &rte_flow_error); + + convert_error(error, &rte_flow_error); + return res; +} + +static struct rte_flow_actions_template *eth_flow_actions_template_create(struct rte_eth_dev *dev, + const struct rte_flow_actions_template_attr *template_attr, + const struct rte_flow_action actions[], const struct rte_flow_action masks[], + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return NULL; + } + + struct pmd_internals *internals = dev->data->dev_private; + + struct fpga_info_s *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info; + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + struct cnv_action_s action = { 0 }; + struct cnv_action_s mask = { 0 }; + struct rte_flow_actions_template_attr attr = { + .ingress = template_attr->ingress, + .egress = template_attr->egress, + .transfer = template_attr->transfer, + }; + uint16_t caller_id = get_caller_id(dev->data->port_id); + + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + uint32_t queue_offset = 0; + + if (internals->type == PORT_TYPE_OVERRIDE && internals->vpq_nb_vq > 0) + queue_offset = internals->vpq[0].id; + + if (create_action_elements_inline(&action, actions, MAX_ACTIONS, queue_offset) != + 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Error in actions"); + return NULL; + } + + if (create_action_elements_inline(&mask, masks, MAX_ACTIONS, queue_offset) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Error in masks"); + return NULL; + } + + } else { + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Unsupported adapter profile"); + return NULL; + } + + struct flow_actions_template *res = + flow_filter_ops->flow_actions_template_create(internals->flw_dev, &attr, caller_id, + action.flow_actions, + mask.flow_actions, &rte_flow_error); + + convert_error(error, &rte_flow_error); + return (struct rte_flow_actions_template *)res; +} + +static int eth_flow_actions_template_destroy(struct rte_eth_dev *dev, + struct rte_flow_actions_template *actions_template, + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + int res = flow_filter_ops->flow_actions_template_destroy(internals->flw_dev, + (struct flow_actions_template *) + actions_template, + &rte_flow_error); + + convert_error(error, &rte_flow_error); + return res; +} + +static struct rte_flow_template_table *eth_flow_template_table_create(struct rte_eth_dev *dev, + const struct rte_flow_template_table_attr *table_attr, + struct rte_flow_pattern_template *pattern_templates[], uint8_t nb_pattern_templates, + struct rte_flow_actions_template *actions_templates[], uint8_t nb_actions_templates, + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return NULL; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + struct rte_flow_template_table_attr attr = { + .flow_attr = { + .group = table_attr->flow_attr.group, + .priority = table_attr->flow_attr.priority, + .ingress = table_attr->flow_attr.ingress, + .egress = table_attr->flow_attr.egress, + .transfer = table_attr->flow_attr.transfer, + }, + .nb_flows = table_attr->nb_flows, + }; + uint16_t forced_vlan_vid = 0; + uint16_t caller_id = get_caller_id(dev->data->port_id); + + struct flow_template_table *res = + flow_filter_ops->flow_template_table_create(internals->flw_dev, &attr, + forced_vlan_vid, caller_id, + (struct flow_pattern_template **)pattern_templates, + nb_pattern_templates, (struct flow_actions_template **)actions_templates, + nb_actions_templates, &rte_flow_error); + + convert_error(error, &rte_flow_error); + return (struct rte_flow_template_table *)res; +} + +static int eth_flow_template_table_destroy(struct rte_eth_dev *dev, + struct rte_flow_template_table *template_table, + struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + int res = flow_filter_ops->flow_template_table_destroy(internals->flw_dev, + (struct flow_template_table *) + template_table, + &rte_flow_error); + + convert_error(error, &rte_flow_error); + return res; +} + +static struct rte_flow *eth_flow_async_create(struct rte_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct rte_flow_template_table *template_table, const struct rte_flow_item pattern[], + uint8_t pattern_template_index, const struct rte_flow_action actions[], + uint8_t actions_template_index, void *user_data, struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return NULL; + } + + struct pmd_internals *internals = dev->data->dev_private; + + struct fpga_info_s *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info; + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + struct cnv_action_s action = { 0 }; + struct cnv_match_s match = { 0 }; + + if (create_match_elements(&match, pattern, MAX_ELEMENTS) != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Error in pattern"); + return NULL; + } + + if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) { + uint32_t queue_offset = 0; + + if (internals->type == PORT_TYPE_OVERRIDE && internals->vpq_nb_vq > 0) + queue_offset = internals->vpq[0].id; + + if (create_action_elements_inline(&action, actions, MAX_ACTIONS, queue_offset) != + 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Error in actions"); + return NULL; + } + + } else { + rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Unsupported adapter profile"); + return NULL; + } + + struct flow_handle *res = + flow_filter_ops->flow_async_create(internals->flw_dev, + queue_id, + (const struct rte_flow_op_attr *)op_attr, + (struct flow_template_table *)template_table, + match.rte_flow_item, + pattern_template_index, + action.flow_actions, + actions_template_index, + user_data, + &rte_flow_error); + + convert_error(error, &rte_flow_error); + return (struct rte_flow *)res; +} + +static int eth_flow_async_destroy(struct rte_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, struct rte_flow *flow, + void *user_data, struct rte_flow_error *error) +{ + const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops(); + + if (flow_filter_ops == NULL) { + NT_LOG_DBGX(ERR, FILTER, "flow_filter module uninitialized"); + return -1; + } + + struct pmd_internals *internals = dev->data->dev_private; + + static struct rte_flow_error rte_flow_error = { .type = RTE_FLOW_ERROR_TYPE_NONE, + .message = "none" }; + + int res = flow_filter_ops->flow_async_destroy(internals->flw_dev, + queue_id, + (const struct rte_flow_op_attr *)op_attr, + (struct flow_handle *)flow, + user_data, + &rte_flow_error); + + convert_error(error, &rte_flow_error); + return res; +} + +static int poll_statistics(struct pmd_internals *internals) +{ + int flow; + struct drv_s *p_drv = internals->p_drv; + struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv; + nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat; + const int if_index = internals->n_intf_no; + uint64_t last_stat_rtc = 0; + + if (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX) + return -1; + + assert(rte_tsc_freq > 0); + + rte_spinlock_lock(&hwlock); + + uint64_t now_rtc = rte_get_tsc_cycles(); + + /* + * Check per port max once a second + * if more than a second since last stat read, do a new one + */ + if ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) { + rte_spinlock_unlock(&hwlock); + return 0; + } + + internals->last_stat_rtc = now_rtc; + + rte_spinlock_lock(&p_nt_drv->stat_lck); + + /* + * Add the RX statistics increments since last time we polled. + * (No difference if physical or virtual port) + */ + internals->rxq_scg[0].rx_pkts += p_nt4ga_stat->a_port_rx_packets_total[if_index] - + p_nt4ga_stat->a_port_rx_packets_base[if_index]; + internals->rxq_scg[0].rx_bytes += p_nt4ga_stat->a_port_rx_octets_total[if_index] - + p_nt4ga_stat->a_port_rx_octets_base[if_index]; + internals->rxq_scg[0].err_pkts += 0; + internals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] - + p_nt4ga_stat->a_port_rx_drops_base[if_index]; + + /* Update the increment bases */ + p_nt4ga_stat->a_port_rx_packets_base[if_index] = + p_nt4ga_stat->a_port_rx_packets_total[if_index]; + p_nt4ga_stat->a_port_rx_octets_base[if_index] = + p_nt4ga_stat->a_port_rx_octets_total[if_index]; + p_nt4ga_stat->a_port_rx_drops_base[if_index] = + p_nt4ga_stat->a_port_rx_drops_total[if_index]; + + /* Tx (here we must distinguish between physical and virtual ports) */ + if (internals->type == PORT_TYPE_PHYSICAL) { + /* Add the statistics increments since last time we polled */ + internals->txq_scg[0].tx_pkts += p_nt4ga_stat->a_port_tx_packets_total[if_index] - + p_nt4ga_stat->a_port_tx_packets_base[if_index]; + internals->txq_scg[0].tx_bytes += p_nt4ga_stat->a_port_tx_octets_total[if_index] - + p_nt4ga_stat->a_port_tx_octets_base[if_index]; + internals->txq_scg[0].err_pkts += 0; + + /* Update the increment bases */ + p_nt4ga_stat->a_port_tx_packets_base[if_index] = + p_nt4ga_stat->a_port_tx_packets_total[if_index]; + p_nt4ga_stat->a_port_tx_octets_base[if_index] = + p_nt4ga_stat->a_port_tx_octets_total[if_index]; + } + + /* Globally only once a second */ + if ((now_rtc - last_stat_rtc) < rte_tsc_freq) { + rte_spinlock_unlock(&hwlock); + rte_spinlock_unlock(&p_nt_drv->stat_lck); + return 0; + } + + last_stat_rtc = now_rtc; + + /* All color counter are global, therefore only 1 pmd must update them */ + const struct color_counters *p_color_counters = p_nt4ga_stat->mp_stat_structs_color; + struct color_counters *p_color_counters_base = p_nt4ga_stat->a_stat_structs_color_base; + uint64_t color_packets_accumulated, color_bytes_accumulated; + + for (flow = 0; flow < MAX_RTE_FLOWS; flow++) { + if (nt_flows[flow].used) { + unsigned int color = nt_flows[flow].flow_stat_id; + + if (color < NT_MAX_COLOR_FLOW_STATS) { + color_packets_accumulated = p_color_counters[color].color_packets; + nt_flows[flow].stat_pkts += + (color_packets_accumulated - + p_color_counters_base[color].color_packets); + + nt_flows[flow].stat_tcp_flags |= p_color_counters[color].tcp_flags; + + color_bytes_accumulated = p_color_counters[color].color_bytes; + nt_flows[flow].stat_bytes += + (color_bytes_accumulated - + p_color_counters_base[color].color_bytes); + + /* Update the counter bases */ + p_color_counters_base[color].color_packets = + color_packets_accumulated; + p_color_counters_base[color].color_bytes = color_bytes_accumulated; + } + } + } + + rte_spinlock_unlock(&hwlock); + rte_spinlock_unlock(&p_nt_drv->stat_lck); + + return 0; +} + +static const struct ntnic_filter_ops ntnic_filter_ops = { + .poll_statistics = poll_statistics, +}; + +void ntnic_filter_init(void) +{ + register_ntnic_filter_ops(&ntnic_filter_ops); +} + +static const struct rte_flow_ops dev_flow_ops = { + .create = eth_flow_create, + .destroy = eth_flow_destroy, + .flush = eth_flow_flush, + .actions_update = eth_flow_actions_update, + .dev_dump = eth_flow_dev_dump, + .get_aged_flows = eth_flow_get_aged_flows, + .info_get = eth_flow_info_get, + .configure = eth_flow_configure, + .pattern_template_create = eth_flow_pattern_template_create, + .pattern_template_destroy = eth_flow_pattern_template_destroy, + .actions_template_create = eth_flow_actions_template_create, + .actions_template_destroy = eth_flow_actions_template_destroy, + .template_table_create = eth_flow_template_table_create, + .template_table_destroy = eth_flow_template_table_destroy, +}; + +void dev_flow_init(void) +{ + register_dev_flow_ops(&dev_flow_ops); +} + +static struct rte_flow_fp_ops async_dev_flow_ops = { + .async_create = eth_flow_async_create, + .async_destroy = eth_flow_async_destroy, +}; + +void dev_fp_flow_init(void) +{ + register_dev_fp_flow_ops(&async_dev_flow_ops); +} diff --git a/drivers/net/ntnic/ntnic_mod_reg.c b/drivers/net/ntnic/ntnic_mod_reg.c index a03c97801b..658fac72c0 100644 --- a/drivers/net/ntnic/ntnic_mod_reg.c +++ b/drivers/net/ntnic/ntnic_mod_reg.c @@ -19,6 +19,42 @@ const struct sg_ops_s *get_sg_ops(void) return sg_ops; } +/* + * + */ +static struct meter_ops_s *meter_ops; + +void register_meter_ops(struct meter_ops_s *ops) +{ + meter_ops = ops; +} + +const struct meter_ops_s *get_meter_ops(void) +{ + if (meter_ops == NULL) + meter_init(); + + return meter_ops; +} + +/* + * + */ +static const struct ntnic_filter_ops *ntnic_filter_ops; + +void register_ntnic_filter_ops(const struct ntnic_filter_ops *ops) +{ + ntnic_filter_ops = ops; +} + +const struct ntnic_filter_ops *get_ntnic_filter_ops(void) +{ + if (ntnic_filter_ops == NULL) + ntnic_filter_init(); + + return ntnic_filter_ops; +} + static struct link_ops_s *link_100g_ops; void register_100g_link_ops(struct link_ops_s *ops) @@ -47,6 +83,21 @@ const struct port_ops *get_port_ops(void) return port_ops; } +static const struct nt4ga_stat_ops *nt4ga_stat_ops; + +void register_nt4ga_stat_ops(const struct nt4ga_stat_ops *ops) +{ + nt4ga_stat_ops = ops; +} + +const struct nt4ga_stat_ops *get_nt4ga_stat_ops(void) +{ + if (nt4ga_stat_ops == NULL) + nt4ga_stat_ops_init(); + + return nt4ga_stat_ops; +} + static const struct adapter_ops *adapter_ops; void register_adapter_ops(const struct adapter_ops *ops) @@ -118,6 +169,21 @@ const struct flow_backend_ops *get_flow_backend_ops(void) return flow_backend_ops; } +static const struct profile_inline_ops *profile_inline_ops; + +void register_profile_inline_ops(const struct profile_inline_ops *ops) +{ + profile_inline_ops = ops; +} + +const struct profile_inline_ops *get_profile_inline_ops(void) +{ + if (profile_inline_ops == NULL) + profile_inline_init(); + + return profile_inline_ops; +} + static const struct flow_filter_ops *flow_filter_ops; void register_flow_filter_ops(const struct flow_filter_ops *ops) @@ -132,3 +198,48 @@ const struct flow_filter_ops *get_flow_filter_ops(void) return flow_filter_ops; } + +static const struct rte_flow_fp_ops *dev_fp_flow_ops; + +void register_dev_fp_flow_ops(const struct rte_flow_fp_ops *ops) +{ + dev_fp_flow_ops = ops; +} + +const struct rte_flow_fp_ops *get_dev_fp_flow_ops(void) +{ + if (dev_fp_flow_ops == NULL) + dev_fp_flow_init(); + + return dev_fp_flow_ops; +} + +static const struct rte_flow_ops *dev_flow_ops; + +void register_dev_flow_ops(const struct rte_flow_ops *ops) +{ + dev_flow_ops = ops; +} + +const struct rte_flow_ops *get_dev_flow_ops(void) +{ + if (dev_flow_ops == NULL) + dev_flow_init(); + + return dev_flow_ops; +} + +static struct ntnic_xstats_ops *ntnic_xstats_ops; + +void register_ntnic_xstats_ops(struct ntnic_xstats_ops *ops) +{ + ntnic_xstats_ops = ops; +} + +struct ntnic_xstats_ops *get_ntnic_xstats_ops(void) +{ + if (ntnic_xstats_ops == NULL) + ntnic_xstats_ops_init(); + + return ntnic_xstats_ops; +} diff --git a/drivers/net/ntnic/ntnic_mod_reg.h b/drivers/net/ntnic/ntnic_mod_reg.h index 5b97b3d8ac..71861c6dea 100644 --- a/drivers/net/ntnic/ntnic_mod_reg.h +++ b/drivers/net/ntnic/ntnic_mod_reg.h @@ -7,13 +7,20 @@ #define __NTNIC_MOD_REG_H__ #include +#include + +#include "rte_ethdev.h" +#include "rte_flow_driver.h" + #include "flow_api.h" +#include "stream_binary_flow_api.h" #include "nthw_fpga_model.h" #include "nthw_platform_drv.h" #include "nthw_drv.h" #include "nt4ga_adapter.h" #include "ntnic_nthw_fpga_rst_nt200a0x.h" #include "ntnic_virt_queue.h" +#include "create_elements.h" /* sg ops section */ struct sg_ops_s { @@ -109,6 +116,26 @@ void register_sg_ops(struct sg_ops_s *ops); const struct sg_ops_s *get_sg_ops(void); void sg_init(void); +/* Meter ops section */ +struct meter_ops_s { + int (*eth_mtr_ops_get)(struct rte_eth_dev *eth_dev, void *ops); +}; + +void register_meter_ops(struct meter_ops_s *ops); +const struct meter_ops_s *get_meter_ops(void); +void meter_init(void); + +/* + * + */ +struct ntnic_filter_ops { + int (*poll_statistics)(struct pmd_internals *internals); +}; + +void register_ntnic_filter_ops(const struct ntnic_filter_ops *ops); +const struct ntnic_filter_ops *get_ntnic_filter_ops(void); +void ntnic_filter_init(void); + struct link_ops_s { int (*link_init)(struct adapter_info_s *p_adapter_info, nthw_fpga_t *p_fpga); }; @@ -173,6 +200,17 @@ void register_port_ops(const struct port_ops *ops); const struct port_ops *get_port_ops(void); void port_init(void); +struct nt4ga_stat_ops { + int (*nt4ga_stat_init)(struct adapter_info_s *p_adapter_info); + int (*nt4ga_stat_setup)(struct adapter_info_s *p_adapter_info); + int (*nt4ga_stat_collect)(struct adapter_info_s *p_adapter_info, + nt4ga_stat_t *p_nt4ga_stat); +}; + +void register_nt4ga_stat_ops(const struct nt4ga_stat_ops *ops); +const struct nt4ga_stat_ops *get_nt4ga_stat_ops(void); +void nt4ga_stat_ops_init(void); + struct adapter_ops { int (*init)(struct adapter_info_s *p_adapter_info); int (*deinit)(struct adapter_info_s *p_adapter_info); @@ -223,14 +261,307 @@ void register_flow_backend_ops(const struct flow_backend_ops *ops); const struct flow_backend_ops *get_flow_backend_ops(void); void flow_backend_init(void); +struct profile_inline_ops { + /* + * Management + */ + + int (*done_flow_management_of_ndev_profile_inline)(struct flow_nic_dev *ndev); + + int (*initialize_flow_management_of_ndev_profile_inline)(struct flow_nic_dev *ndev); + + /* + * Flow functionality + */ + int (*flow_destroy_locked_profile_inline)(struct flow_eth_dev *dev, + struct flow_handle *fh, + struct rte_flow_error *error); + + struct flow_handle *(*flow_create_profile_inline)(struct flow_eth_dev *dev, + const struct rte_flow_attr *attr, + uint16_t forced_vlan_vid, + uint16_t caller_id, + const struct rte_flow_item elem[], + const struct rte_flow_action action[], + struct rte_flow_error *error); + + int (*flow_destroy_profile_inline)(struct flow_eth_dev *dev, + struct flow_handle *flow, + struct rte_flow_error *error); + + int (*flow_flush_profile_inline)(struct flow_eth_dev *dev, + uint16_t caller_id, + struct rte_flow_error *error); + + int (*flow_actions_update_profile_inline)(struct flow_eth_dev *dev, + struct flow_handle *flow, + const struct rte_flow_action action[], + struct rte_flow_error *error); + + int (*flow_dev_dump_profile_inline)(struct flow_eth_dev *dev, + struct flow_handle *flow, + uint16_t caller_id, + FILE *file, + struct rte_flow_error *error); + + int (*flow_get_aged_flows_profile_inline)(struct flow_eth_dev *dev, + uint16_t caller_id, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error); + + /* + * RTE flow asynchronous operations functions + */ + + struct flow_pattern_template *(*flow_pattern_template_create_profile_inline) + (struct flow_eth_dev *dev, + const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_item pattern[], struct rte_flow_error *error); + + int (*flow_pattern_template_destroy_profile_inline)(struct flow_eth_dev *dev, + struct flow_pattern_template *pattern_template, + struct rte_flow_error *error); + + struct flow_actions_template *(*flow_actions_template_create_profile_inline) + (struct flow_eth_dev *dev, + const struct rte_flow_actions_template_attr *template_attr, + uint16_t caller_id, const struct rte_flow_action actions[], + const struct rte_flow_action masks[], struct rte_flow_error *error); + + int (*flow_actions_template_destroy_profile_inline)(struct flow_eth_dev *dev, + struct flow_actions_template *actions_template, + struct rte_flow_error *error); + + struct flow_template_table *(*flow_template_table_create_profile_inline) + (struct flow_eth_dev *dev, const struct rte_flow_template_table_attr *table_attr, + uint16_t forced_vlan_vid, uint16_t caller_id, + struct flow_pattern_template *pattern_templates[], uint8_t nb_pattern_templates, + struct flow_actions_template *actions_templates[], uint8_t nb_actions_templates, + struct rte_flow_error *error); + + int (*flow_template_table_destroy_profile_inline)(struct flow_eth_dev *dev, + struct flow_template_table *template_table, + struct rte_flow_error *error); + + struct flow_handle *(*flow_async_create_profile_inline)(struct flow_eth_dev *dev, + uint32_t queue_id, const struct rte_flow_op_attr *op_attr, + struct flow_template_table *template_table, const struct rte_flow_item pattern[], + uint8_t rte_pattern_template_index, const struct rte_flow_action actions[], + uint8_t rte_actions_template_index, void *user_data, struct rte_flow_error *error); + + int (*flow_async_destroy_profile_inline)(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct flow_handle *flow, void *user_data, + struct rte_flow_error *error); + + int (*flow_nic_set_hasher_fields_inline)(struct flow_nic_dev *ndev, + int hsh_idx, + struct nt_eth_rss_conf rss_conf); + + /* + * Stats + */ + int (*flow_get_flm_stats_profile_inline)(struct flow_nic_dev *ndev, + uint64_t *data, + uint64_t size); + + /* + * NT Flow FLM queue API + */ + void (*flm_setup_queues)(void); + void (*flm_free_queues)(void); + + /* + * NT Flow FLM Meter API + */ + int (*flow_mtr_supported)(struct flow_eth_dev *dev); + uint64_t (*flow_mtr_meter_policy_n_max)(void); + int (*flow_mtr_set_profile)(struct flow_eth_dev *dev, uint32_t profile_id, + uint64_t bucket_rate_a, uint64_t bucket_size_a, + uint64_t bucket_rate_b, uint64_t bucket_size_b); + int (*flow_mtr_set_policy)(struct flow_eth_dev *dev, uint32_t policy_id, int drop); + int (*flow_mtr_create_meter)(struct flow_eth_dev *dev, uint8_t caller_id, uint32_t mtr_id, + uint32_t profile_id, uint32_t policy_id, uint64_t stats_mask); + int (*flow_mtr_probe_meter)(struct flow_eth_dev *dev, uint8_t caller_id, uint32_t mtr_id); + int (*flow_mtr_destroy_meter)(struct flow_eth_dev *dev, uint8_t caller_id, + uint32_t mtr_id); + int (*flm_mtr_adjust_stats)(struct flow_eth_dev *dev, uint8_t caller_id, uint32_t mtr_id, + uint32_t adjust_value); + uint32_t (*flow_mtr_meters_supported)(struct flow_eth_dev *dev, uint8_t caller_id); + + void (*flm_mtr_read_stats)(struct flow_eth_dev *dev, + uint8_t caller_id, + uint32_t id, + uint64_t *stats_mask, + uint64_t *green_pkt, + uint64_t *green_bytes, + int clear); + + uint32_t (*flm_update)(struct flow_eth_dev *dev); + + int (*flow_info_get_profile_inline)(struct flow_eth_dev *dev, uint8_t caller_id, + struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error); + + int (*flow_configure_profile_inline)(struct flow_eth_dev *dev, uint8_t caller_id, + const struct rte_flow_port_attr *port_attr, uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error); + + /* + * Config API + */ + int (*flow_set_mtu_inline)(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu); +}; + +void register_profile_inline_ops(const struct profile_inline_ops *ops); +const struct profile_inline_ops *get_profile_inline_ops(void); +void profile_inline_init(void); + struct flow_filter_ops { int (*flow_filter_init)(nthw_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device, int adapter_no); int (*flow_filter_done)(struct flow_nic_dev *dev); + /* + * Device Management API + */ + struct flow_eth_dev *(*flow_get_eth_dev)(uint8_t adapter_no, + uint8_t hw_port_no, + uint32_t port_id, + int alloc_rx_queues, + struct flow_queue_id_s queue_ids[], + int *rss_target_id, + enum flow_eth_dev_profile flow_profile, + uint32_t exception_path); + int (*flow_dev_dump)(struct flow_eth_dev *dev, + struct flow_handle *flow, + uint16_t caller_id, + FILE *file, + struct rte_flow_error *error); + /* + * NT Flow API + */ + struct flow_handle *(*flow_create)(struct flow_eth_dev *dev, + const struct rte_flow_attr *attr, + uint16_t forced_vlan_vid, + uint16_t caller_id, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *error); + + int (*flow_destroy)(struct flow_eth_dev *dev, + struct flow_handle *flow, + struct rte_flow_error *error); + + int (*flow_flush)(struct flow_eth_dev *dev, uint16_t caller_id, + struct rte_flow_error *error); + + int (*flow_actions_update)(struct flow_eth_dev *dev, + struct flow_handle *flow, + const struct rte_flow_action action[], + struct rte_flow_error *error); + + int (*flow_get_flm_stats)(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size); + + /* + * Other + */ + int (*flow_nic_set_hasher_fields)(struct flow_nic_dev *ndev, int hsh_idx, + struct nt_eth_rss_conf rss_conf); + int (*hw_mod_hsh_rcp_flush)(struct flow_api_backend_s *be, int start_idx, int count); + + int (*flow_get_aged_flows)(struct flow_eth_dev *dev, + uint16_t caller_id, + void **context, + uint32_t nb_contexts, + struct rte_flow_error *error); + + /* + * RTE flow asynchronous operations functions + */ + struct flow_pattern_template *(*flow_pattern_template_create)(struct flow_eth_dev *dev, + const struct rte_flow_pattern_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_item pattern[], struct rte_flow_error *error); + + int (*flow_pattern_template_destroy)(struct flow_eth_dev *dev, + struct flow_pattern_template *pattern_template, + struct rte_flow_error *error); + + struct flow_actions_template *(*flow_actions_template_create)(struct flow_eth_dev *dev, + const struct rte_flow_actions_template_attr *template_attr, uint16_t caller_id, + const struct rte_flow_action actions[], const struct rte_flow_action masks[], + struct rte_flow_error *error); + + int (*flow_actions_template_destroy)(struct flow_eth_dev *dev, + struct flow_actions_template *actions_template, + struct rte_flow_error *error); + + struct flow_template_table *(*flow_template_table_create)(struct flow_eth_dev *dev, + const struct rte_flow_template_table_attr *table_attr, uint16_t forced_vlan_vid, + uint16_t caller_id, + struct flow_pattern_template *pattern_templates[], uint8_t nb_pattern_templates, + struct flow_actions_template *actions_templates[], uint8_t nb_actions_templates, + struct rte_flow_error *error); + + int (*flow_template_table_destroy)(struct flow_eth_dev *dev, + struct flow_template_table *template_table, + struct rte_flow_error *error); + + struct flow_handle *(*flow_async_create)(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct flow_template_table *template_table, const struct rte_flow_item pattern[], + uint8_t pattern_template_index, const struct rte_flow_action actions[], + uint8_t actions_template_index, void *user_data, struct rte_flow_error *error); + + int (*flow_async_destroy)(struct flow_eth_dev *dev, uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, struct flow_handle *flow, + void *user_data, struct rte_flow_error *error); + + int (*flow_info_get)(struct flow_eth_dev *dev, uint8_t caller_id, + struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info, + struct rte_flow_error *error); + + int (*flow_configure)(struct flow_eth_dev *dev, uint8_t caller_id, + const struct rte_flow_port_attr *port_attr, uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error); }; +void register_dev_fp_flow_ops(const struct rte_flow_fp_ops *ops); +const struct rte_flow_fp_ops *get_dev_fp_flow_ops(void); +void dev_fp_flow_init(void); + +void register_dev_flow_ops(const struct rte_flow_ops *ops); +const struct rte_flow_ops *get_dev_flow_ops(void); +void dev_flow_init(void); + void register_flow_filter_ops(const struct flow_filter_ops *ops); const struct flow_filter_ops *get_flow_filter_ops(void); void init_flow_filter(void); +struct ntnic_xstats_ops { + int (*nthw_xstats_get_names)(nt4ga_stat_t *p_nt4ga_stat, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); + int (*nthw_xstats_get)(nt4ga_stat_t *p_nt4ga_stat, + struct rte_eth_xstat *stats, + unsigned int n, + uint8_t port); + void (*nthw_xstats_reset)(nt4ga_stat_t *p_nt4ga_stat, uint8_t port); + int (*nthw_xstats_get_names_by_id)(nt4ga_stat_t *p_nt4ga_stat, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int size); + int (*nthw_xstats_get_by_id)(nt4ga_stat_t *p_nt4ga_stat, + const uint64_t *ids, + uint64_t *values, + unsigned int n, + uint8_t port); +}; + +void register_ntnic_xstats_ops(struct ntnic_xstats_ops *ops); +struct ntnic_xstats_ops *get_ntnic_xstats_ops(void); +void ntnic_xstats_ops_init(void); + #endif /* __NTNIC_MOD_REG_H__ */ diff --git a/drivers/net/ntnic/ntnic_xstats/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats/ntnic_xstats.c new file mode 100644 index 0000000000..7604afe6a0 --- /dev/null +++ b/drivers/net/ntnic/ntnic_xstats/ntnic_xstats.c @@ -0,0 +1,829 @@ +/* + * SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Napatech A/S + */ + +#include + +#include "include/ntdrv_4ga.h" +#include "ntlog.h" +#include "nthw_drv.h" +#include "nthw_fpga.h" +#include "stream_binary_flow_api.h" +#include "ntnic_mod_reg.h" + +struct rte_nthw_xstats_names_s { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint8_t source; + unsigned int offset; +}; + +/* + * Extended stat for Capture/Inline - implements RMON + * FLM 0.17 + */ +static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = { + { "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) }, + { "rx_octets", 1, offsetof(struct port_counters_v2, octets) }, + { "rx_packets", 1, offsetof(struct port_counters_v2, pkts) }, + { "rx_broadcast_packets", 1, offsetof(struct port_counters_v2, broadcast_pkts) }, + { "rx_multicast_packets", 1, offsetof(struct port_counters_v2, multicast_pkts) }, + { "rx_unicast_packets", 1, offsetof(struct port_counters_v2, unicast_pkts) }, + { "rx_align_errors", 1, offsetof(struct port_counters_v2, pkts_alignment) }, + { "rx_code_violation_errors", 1, offsetof(struct port_counters_v2, pkts_code_violation) }, + { "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) }, + { "rx_undersize_packets", 1, offsetof(struct port_counters_v2, undersize_pkts) }, + { "rx_oversize_packets", 1, offsetof(struct port_counters_v2, oversize_pkts) }, + { "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) }, + { + "rx_jabbers_not_truncated", 1, + offsetof(struct port_counters_v2, jabbers_not_truncated) + }, + { "rx_jabbers_truncated", 1, offsetof(struct port_counters_v2, jabbers_truncated) }, + { "rx_size_64_packets", 1, offsetof(struct port_counters_v2, pkts_64_octets) }, + { + "rx_size_65_to_127_packets", 1, + offsetof(struct port_counters_v2, pkts_65_to_127_octets) + }, + { + "rx_size_128_to_255_packets", 1, + offsetof(struct port_counters_v2, pkts_128_to_255_octets) + }, + { + "rx_size_256_to_511_packets", 1, + offsetof(struct port_counters_v2, pkts_256_to_511_octets) + }, + { + "rx_size_512_to_1023_packets", 1, + offsetof(struct port_counters_v2, pkts_512_to_1023_octets) + }, + { + "rx_size_1024_to_1518_packets", 1, + offsetof(struct port_counters_v2, pkts_1024_to_1518_octets) + }, + { + "rx_size_1519_to_2047_packets", 1, + offsetof(struct port_counters_v2, pkts_1519_to_2047_octets) + }, + { + "rx_size_2048_to_4095_packets", 1, + offsetof(struct port_counters_v2, pkts_2048_to_4095_octets) + }, + { + "rx_size_4096_to_8191_packets", 1, + offsetof(struct port_counters_v2, pkts_4096_to_8191_octets) + }, + { + "rx_size_8192_to_max_packets", 1, + offsetof(struct port_counters_v2, pkts_8192_to_max_octets) + }, + { "rx_ip_checksum_error", 1, offsetof(struct port_counters_v2, pkts_ip_chksum_error) }, + { "rx_udp_checksum_error", 1, offsetof(struct port_counters_v2, pkts_udp_chksum_error) }, + { "rx_tcp_checksum_error", 1, offsetof(struct port_counters_v2, pkts_tcp_chksum_error) }, + + { "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) }, + { "tx_octets", 2, offsetof(struct port_counters_v2, octets) }, + { "tx_packets", 2, offsetof(struct port_counters_v2, pkts) }, + { "tx_broadcast_packets", 2, offsetof(struct port_counters_v2, broadcast_pkts) }, + { "tx_multicast_packets", 2, offsetof(struct port_counters_v2, multicast_pkts) }, + { "tx_unicast_packets", 2, offsetof(struct port_counters_v2, unicast_pkts) }, + { "tx_align_errors", 2, offsetof(struct port_counters_v2, pkts_alignment) }, + { "tx_code_violation_errors", 2, offsetof(struct port_counters_v2, pkts_code_violation) }, + { "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) }, + { "tx_undersize_packets", 2, offsetof(struct port_counters_v2, undersize_pkts) }, + { "tx_oversize_packets", 2, offsetof(struct port_counters_v2, oversize_pkts) }, + { "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) }, + { + "tx_jabbers_not_truncated", 2, + offsetof(struct port_counters_v2, jabbers_not_truncated) + }, + { "tx_jabbers_truncated", 2, offsetof(struct port_counters_v2, jabbers_truncated) }, + { "tx_size_64_packets", 2, offsetof(struct port_counters_v2, pkts_64_octets) }, + { + "tx_size_65_to_127_packets", 2, + offsetof(struct port_counters_v2, pkts_65_to_127_octets) + }, + { + "tx_size_128_to_255_packets", 2, + offsetof(struct port_counters_v2, pkts_128_to_255_octets) + }, + { + "tx_size_256_to_511_packets", 2, + offsetof(struct port_counters_v2, pkts_256_to_511_octets) + }, + { + "tx_size_512_to_1023_packets", 2, + offsetof(struct port_counters_v2, pkts_512_to_1023_octets) + }, + { + "tx_size_1024_to_1518_packets", 2, + offsetof(struct port_counters_v2, pkts_1024_to_1518_octets) + }, + { + "tx_size_1519_to_2047_packets", 2, + offsetof(struct port_counters_v2, pkts_1519_to_2047_octets) + }, + { + "tx_size_2048_to_4095_packets", 2, + offsetof(struct port_counters_v2, pkts_2048_to_4095_octets) + }, + { + "tx_size_4096_to_8191_packets", 2, + offsetof(struct port_counters_v2, pkts_4096_to_8191_octets) + }, + { + "tx_size_8192_to_max_packets", 2, + offsetof(struct port_counters_v2, pkts_8192_to_max_octets) + }, + + /* FLM 0.17 */ + { "flm_count_current", 3, offsetof(struct flm_counters_v1, current) }, + { "flm_count_learn_done", 3, offsetof(struct flm_counters_v1, learn_done) }, + { "flm_count_learn_ignore", 3, offsetof(struct flm_counters_v1, learn_ignore) }, + { "flm_count_learn_fail", 3, offsetof(struct flm_counters_v1, learn_fail) }, + { "flm_count_unlearn_done", 3, offsetof(struct flm_counters_v1, unlearn_done) }, + { "flm_count_unlearn_ignore", 3, offsetof(struct flm_counters_v1, unlearn_ignore) }, + { "flm_count_auto_unlearn_done", 3, offsetof(struct flm_counters_v1, auto_unlearn_done) }, + { + "flm_count_auto_unlearn_ignore", 3, + offsetof(struct flm_counters_v1, auto_unlearn_ignore) + }, + { "flm_count_auto_unlearn_fail", 3, offsetof(struct flm_counters_v1, auto_unlearn_fail) }, + { + "flm_count_timeout_unlearn_done", 3, + offsetof(struct flm_counters_v1, timeout_unlearn_done) + }, + { "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) }, + { "flm_count_rel_ignore", 3, offsetof(struct flm_counters_v1, rel_ignore) }, + { "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) }, + { "flm_count_prb_ignore", 3, offsetof(struct flm_counters_v1, prb_ignore) } +}; + +/* + * Extended stat for Capture/Inline - implements RMON + * FLM 0.18 + */ +static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = { + { "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) }, + { "rx_octets", 1, offsetof(struct port_counters_v2, octets) }, + { "rx_packets", 1, offsetof(struct port_counters_v2, pkts) }, + { "rx_broadcast_packets", 1, offsetof(struct port_counters_v2, broadcast_pkts) }, + { "rx_multicast_packets", 1, offsetof(struct port_counters_v2, multicast_pkts) }, + { "rx_unicast_packets", 1, offsetof(struct port_counters_v2, unicast_pkts) }, + { "rx_align_errors", 1, offsetof(struct port_counters_v2, pkts_alignment) }, + { "rx_code_violation_errors", 1, offsetof(struct port_counters_v2, pkts_code_violation) }, + { "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) }, + { "rx_undersize_packets", 1, offsetof(struct port_counters_v2, undersize_pkts) }, + { "rx_oversize_packets", 1, offsetof(struct port_counters_v2, oversize_pkts) }, + { "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) }, + { + "rx_jabbers_not_truncated", 1, + offsetof(struct port_counters_v2, jabbers_not_truncated) + }, + { "rx_jabbers_truncated", 1, offsetof(struct port_counters_v2, jabbers_truncated) }, + { "rx_size_64_packets", 1, offsetof(struct port_counters_v2, pkts_64_octets) }, + { + "rx_size_65_to_127_packets", 1, + offsetof(struct port_counters_v2, pkts_65_to_127_octets) + }, + { + "rx_size_128_to_255_packets", 1, + offsetof(struct port_counters_v2, pkts_128_to_255_octets) + }, + { + "rx_size_256_to_511_packets", 1, + offsetof(struct port_counters_v2, pkts_256_to_511_octets) + }, + { + "rx_size_512_to_1023_packets", 1, + offsetof(struct port_counters_v2, pkts_512_to_1023_octets) + }, + { + "rx_size_1024_to_1518_packets", 1, + offsetof(struct port_counters_v2, pkts_1024_to_1518_octets) + }, + { + "rx_size_1519_to_2047_packets", 1, + offsetof(struct port_counters_v2, pkts_1519_to_2047_octets) + }, + { + "rx_size_2048_to_4095_packets", 1, + offsetof(struct port_counters_v2, pkts_2048_to_4095_octets) + }, + { + "rx_size_4096_to_8191_packets", 1, + offsetof(struct port_counters_v2, pkts_4096_to_8191_octets) + }, + { + "rx_size_8192_to_max_packets", 1, + offsetof(struct port_counters_v2, pkts_8192_to_max_octets) + }, + { "rx_ip_checksum_error", 1, offsetof(struct port_counters_v2, pkts_ip_chksum_error) }, + { "rx_udp_checksum_error", 1, offsetof(struct port_counters_v2, pkts_udp_chksum_error) }, + { "rx_tcp_checksum_error", 1, offsetof(struct port_counters_v2, pkts_tcp_chksum_error) }, + + { "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) }, + { "tx_octets", 2, offsetof(struct port_counters_v2, octets) }, + { "tx_packets", 2, offsetof(struct port_counters_v2, pkts) }, + { "tx_broadcast_packets", 2, offsetof(struct port_counters_v2, broadcast_pkts) }, + { "tx_multicast_packets", 2, offsetof(struct port_counters_v2, multicast_pkts) }, + { "tx_unicast_packets", 2, offsetof(struct port_counters_v2, unicast_pkts) }, + { "tx_align_errors", 2, offsetof(struct port_counters_v2, pkts_alignment) }, + { "tx_code_violation_errors", 2, offsetof(struct port_counters_v2, pkts_code_violation) }, + { "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) }, + { "tx_undersize_packets", 2, offsetof(struct port_counters_v2, undersize_pkts) }, + { "tx_oversize_packets", 2, offsetof(struct port_counters_v2, oversize_pkts) }, + { "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) }, + { + "tx_jabbers_not_truncated", 2, + offsetof(struct port_counters_v2, jabbers_not_truncated) + }, + { "tx_jabbers_truncated", 2, offsetof(struct port_counters_v2, jabbers_truncated) }, + { "tx_size_64_packets", 2, offsetof(struct port_counters_v2, pkts_64_octets) }, + { + "tx_size_65_to_127_packets", 2, + offsetof(struct port_counters_v2, pkts_65_to_127_octets) + }, + { + "tx_size_128_to_255_packets", 2, + offsetof(struct port_counters_v2, pkts_128_to_255_octets) + }, + { + "tx_size_256_to_511_packets", 2, + offsetof(struct port_counters_v2, pkts_256_to_511_octets) + }, + { + "tx_size_512_to_1023_packets", 2, + offsetof(struct port_counters_v2, pkts_512_to_1023_octets) + }, + { + "tx_size_1024_to_1518_packets", 2, + offsetof(struct port_counters_v2, pkts_1024_to_1518_octets) + }, + { + "tx_size_1519_to_2047_packets", 2, + offsetof(struct port_counters_v2, pkts_1519_to_2047_octets) + }, + { + "tx_size_2048_to_4095_packets", 2, + offsetof(struct port_counters_v2, pkts_2048_to_4095_octets) + }, + { + "tx_size_4096_to_8191_packets", 2, + offsetof(struct port_counters_v2, pkts_4096_to_8191_octets) + }, + { + "tx_size_8192_to_max_packets", 2, + offsetof(struct port_counters_v2, pkts_8192_to_max_octets) + }, + + /* FLM 0.17 */ + { "flm_count_current", 3, offsetof(struct flm_counters_v1, current) }, + { "flm_count_learn_done", 3, offsetof(struct flm_counters_v1, learn_done) }, + { "flm_count_learn_ignore", 3, offsetof(struct flm_counters_v1, learn_ignore) }, + { "flm_count_learn_fail", 3, offsetof(struct flm_counters_v1, learn_fail) }, + { "flm_count_unlearn_done", 3, offsetof(struct flm_counters_v1, unlearn_done) }, + { "flm_count_unlearn_ignore", 3, offsetof(struct flm_counters_v1, unlearn_ignore) }, + { "flm_count_auto_unlearn_done", 3, offsetof(struct flm_counters_v1, auto_unlearn_done) }, + { + "flm_count_auto_unlearn_ignore", 3, + offsetof(struct flm_counters_v1, auto_unlearn_ignore) + }, + { "flm_count_auto_unlearn_fail", 3, offsetof(struct flm_counters_v1, auto_unlearn_fail) }, + { + "flm_count_timeout_unlearn_done", 3, + offsetof(struct flm_counters_v1, timeout_unlearn_done) + }, + { "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) }, + { "flm_count_rel_ignore", 3, offsetof(struct flm_counters_v1, rel_ignore) }, + { "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) }, + { "flm_count_prb_ignore", 3, offsetof(struct flm_counters_v1, prb_ignore) }, + + /* FLM 0.20 */ + { "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) }, + { "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) }, + { "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) }, + { "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) }, + { "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) }, + { "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) }, + { "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) }, + { "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) }, + { "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) }, + { "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) }, + { "flm_count_cuc_start", 3, offsetof(struct flm_counters_v1, cuc_start) }, + { "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) } +}; + +/* + * Extended stat for Capture/Inline - implements RMON + * STA 0.9 + */ + +static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v3[] = { + { "rx_drop_events", 1, offsetof(struct port_counters_v2, drop_events) }, + { "rx_octets", 1, offsetof(struct port_counters_v2, octets) }, + { "rx_packets", 1, offsetof(struct port_counters_v2, pkts) }, + { "rx_broadcast_packets", 1, offsetof(struct port_counters_v2, broadcast_pkts) }, + { "rx_multicast_packets", 1, offsetof(struct port_counters_v2, multicast_pkts) }, + { "rx_unicast_packets", 1, offsetof(struct port_counters_v2, unicast_pkts) }, + { "rx_align_errors", 1, offsetof(struct port_counters_v2, pkts_alignment) }, + { "rx_code_violation_errors", 1, offsetof(struct port_counters_v2, pkts_code_violation) }, + { "rx_crc_errors", 1, offsetof(struct port_counters_v2, pkts_crc) }, + { "rx_undersize_packets", 1, offsetof(struct port_counters_v2, undersize_pkts) }, + { "rx_oversize_packets", 1, offsetof(struct port_counters_v2, oversize_pkts) }, + { "rx_fragments", 1, offsetof(struct port_counters_v2, fragments) }, + { + "rx_jabbers_not_truncated", 1, + offsetof(struct port_counters_v2, jabbers_not_truncated) + }, + { "rx_jabbers_truncated", 1, offsetof(struct port_counters_v2, jabbers_truncated) }, + { "rx_size_64_packets", 1, offsetof(struct port_counters_v2, pkts_64_octets) }, + { + "rx_size_65_to_127_packets", 1, + offsetof(struct port_counters_v2, pkts_65_to_127_octets) + }, + { + "rx_size_128_to_255_packets", 1, + offsetof(struct port_counters_v2, pkts_128_to_255_octets) + }, + { + "rx_size_256_to_511_packets", 1, + offsetof(struct port_counters_v2, pkts_256_to_511_octets) + }, + { + "rx_size_512_to_1023_packets", 1, + offsetof(struct port_counters_v2, pkts_512_to_1023_octets) + }, + { + "rx_size_1024_to_1518_packets", 1, + offsetof(struct port_counters_v2, pkts_1024_to_1518_octets) + }, + { + "rx_size_1519_to_2047_packets", 1, + offsetof(struct port_counters_v2, pkts_1519_to_2047_octets) + }, + { + "rx_size_2048_to_4095_packets", 1, + offsetof(struct port_counters_v2, pkts_2048_to_4095_octets) + }, + { + "rx_size_4096_to_8191_packets", 1, + offsetof(struct port_counters_v2, pkts_4096_to_8191_octets) + }, + { + "rx_size_8192_to_max_packets", 1, + offsetof(struct port_counters_v2, pkts_8192_to_max_octets) + }, + { "rx_ip_checksum_error", 1, offsetof(struct port_counters_v2, pkts_ip_chksum_error) }, + { "rx_udp_checksum_error", 1, offsetof(struct port_counters_v2, pkts_udp_chksum_error) }, + { "rx_tcp_checksum_error", 1, offsetof(struct port_counters_v2, pkts_tcp_chksum_error) }, + + { "tx_drop_events", 2, offsetof(struct port_counters_v2, drop_events) }, + { "tx_octets", 2, offsetof(struct port_counters_v2, octets) }, + { "tx_packets", 2, offsetof(struct port_counters_v2, pkts) }, + { "tx_broadcast_packets", 2, offsetof(struct port_counters_v2, broadcast_pkts) }, + { "tx_multicast_packets", 2, offsetof(struct port_counters_v2, multicast_pkts) }, + { "tx_unicast_packets", 2, offsetof(struct port_counters_v2, unicast_pkts) }, + { "tx_align_errors", 2, offsetof(struct port_counters_v2, pkts_alignment) }, + { "tx_code_violation_errors", 2, offsetof(struct port_counters_v2, pkts_code_violation) }, + { "tx_crc_errors", 2, offsetof(struct port_counters_v2, pkts_crc) }, + { "tx_undersize_packets", 2, offsetof(struct port_counters_v2, undersize_pkts) }, + { "tx_oversize_packets", 2, offsetof(struct port_counters_v2, oversize_pkts) }, + { "tx_fragments", 2, offsetof(struct port_counters_v2, fragments) }, + { + "tx_jabbers_not_truncated", 2, + offsetof(struct port_counters_v2, jabbers_not_truncated) + }, + { "tx_jabbers_truncated", 2, offsetof(struct port_counters_v2, jabbers_truncated) }, + { "tx_size_64_packets", 2, offsetof(struct port_counters_v2, pkts_64_octets) }, + { + "tx_size_65_to_127_packets", 2, + offsetof(struct port_counters_v2, pkts_65_to_127_octets) + }, + { + "tx_size_128_to_255_packets", 2, + offsetof(struct port_counters_v2, pkts_128_to_255_octets) + }, + { + "tx_size_256_to_511_packets", 2, + offsetof(struct port_counters_v2, pkts_256_to_511_octets) + }, + { + "tx_size_512_to_1023_packets", 2, + offsetof(struct port_counters_v2, pkts_512_to_1023_octets) + }, + { + "tx_size_1024_to_1518_packets", 2, + offsetof(struct port_counters_v2, pkts_1024_to_1518_octets) + }, + { + "tx_size_1519_to_2047_packets", 2, + offsetof(struct port_counters_v2, pkts_1519_to_2047_octets) + }, + { + "tx_size_2048_to_4095_packets", 2, + offsetof(struct port_counters_v2, pkts_2048_to_4095_octets) + }, + { + "tx_size_4096_to_8191_packets", 2, + offsetof(struct port_counters_v2, pkts_4096_to_8191_octets) + }, + { + "tx_size_8192_to_max_packets", 2, + offsetof(struct port_counters_v2, pkts_8192_to_max_octets) + }, + + /* FLM 0.17 */ + { "flm_count_current", 3, offsetof(struct flm_counters_v1, current) }, + { "flm_count_learn_done", 3, offsetof(struct flm_counters_v1, learn_done) }, + { "flm_count_learn_ignore", 3, offsetof(struct flm_counters_v1, learn_ignore) }, + { "flm_count_learn_fail", 3, offsetof(struct flm_counters_v1, learn_fail) }, + { "flm_count_unlearn_done", 3, offsetof(struct flm_counters_v1, unlearn_done) }, + { "flm_count_unlearn_ignore", 3, offsetof(struct flm_counters_v1, unlearn_ignore) }, + { "flm_count_auto_unlearn_done", 3, offsetof(struct flm_counters_v1, auto_unlearn_done) }, + { + "flm_count_auto_unlearn_ignore", 3, + offsetof(struct flm_counters_v1, auto_unlearn_ignore) + }, + { "flm_count_auto_unlearn_fail", 3, offsetof(struct flm_counters_v1, auto_unlearn_fail) }, + { + "flm_count_timeout_unlearn_done", 3, + offsetof(struct flm_counters_v1, timeout_unlearn_done) + }, + { "flm_count_rel_done", 3, offsetof(struct flm_counters_v1, rel_done) }, + { "flm_count_rel_ignore", 3, offsetof(struct flm_counters_v1, rel_ignore) }, + { "flm_count_prb_done", 3, offsetof(struct flm_counters_v1, prb_done) }, + { "flm_count_prb_ignore", 3, offsetof(struct flm_counters_v1, prb_ignore) }, + + /* FLM 0.20 */ + { "flm_count_sta_done", 3, offsetof(struct flm_counters_v1, sta_done) }, + { "flm_count_inf_done", 3, offsetof(struct flm_counters_v1, inf_done) }, + { "flm_count_inf_skip", 3, offsetof(struct flm_counters_v1, inf_skip) }, + { "flm_count_pck_hit", 3, offsetof(struct flm_counters_v1, pck_hit) }, + { "flm_count_pck_miss", 3, offsetof(struct flm_counters_v1, pck_miss) }, + { "flm_count_pck_unh", 3, offsetof(struct flm_counters_v1, pck_unh) }, + { "flm_count_pck_dis", 3, offsetof(struct flm_counters_v1, pck_dis) }, + { "flm_count_csh_hit", 3, offsetof(struct flm_counters_v1, csh_hit) }, + { "flm_count_csh_miss", 3, offsetof(struct flm_counters_v1, csh_miss) }, + { "flm_count_csh_unh", 3, offsetof(struct flm_counters_v1, csh_unh) }, + { "flm_count_cuc_start", 3, offsetof(struct flm_counters_v1, cuc_start) }, + { "flm_count_cuc_move", 3, offsetof(struct flm_counters_v1, cuc_move) }, + + /* FLM 0.17 */ + { "flm_count_load_lps", 3, offsetof(struct flm_counters_v1, load_lps) }, + { "flm_count_load_aps", 3, offsetof(struct flm_counters_v1, load_aps) }, + { "flm_count_max_lps", 3, offsetof(struct flm_counters_v1, max_lps) }, + { "flm_count_max_aps", 3, offsetof(struct flm_counters_v1, max_aps) }, + + { "rx_packet_per_second", 4, offsetof(struct port_load_counters, rx_pps) }, + { "rx_max_packet_per_second", 4, offsetof(struct port_load_counters, rx_pps_max) }, + { "rx_bits_per_second", 4, offsetof(struct port_load_counters, rx_bps) }, + { "rx_max_bits_per_second", 4, offsetof(struct port_load_counters, rx_bps_max) }, + { "tx_packet_per_second", 4, offsetof(struct port_load_counters, tx_pps) }, + { "tx_max_packet_per_second", 4, offsetof(struct port_load_counters, tx_pps_max) }, + { "tx_bits_per_second", 4, offsetof(struct port_load_counters, tx_bps) }, + { "tx_max_bits_per_second", 4, offsetof(struct port_load_counters, tx_bps_max) } +}; + +#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1) +#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2) +#define NTHW_CAP_XSTATS_NAMES_V3 RTE_DIM(nthw_cap_xstats_names_v3) + +/* + * Container for the reset values + */ +#define NTHW_XSTATS_SIZE NTHW_CAP_XSTATS_NAMES_V3 + +static uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = { 0 }; + +/* + * These functions must only be called with stat mutex locked + */ +static int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, + struct rte_eth_xstat *stats, + unsigned int n, + uint8_t port) +{ + unsigned int i; + uint8_t *pld_ptr; + uint8_t *flm_ptr; + uint8_t *rx_ptr; + uint8_t *tx_ptr; + uint32_t nb_names; + struct rte_nthw_xstats_names_s *names; + + pld_ptr = (uint8_t *)&p_nt4ga_stat->mp_port_load[port]; + flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm; + rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port]; + tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port]; + + if (p_nt4ga_stat->flm_stat_ver < 18) { + names = nthw_cap_xstats_names_v1; + nb_names = NTHW_CAP_XSTATS_NAMES_V1; + + } else if (p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version < 7 || + p_nt4ga_stat->flm_stat_ver < 23) { + names = nthw_cap_xstats_names_v2; + nb_names = NTHW_CAP_XSTATS_NAMES_V2; + + } else { + names = nthw_cap_xstats_names_v3; + nb_names = NTHW_CAP_XSTATS_NAMES_V3; + } + + for (i = 0; i < n && i < nb_names; i++) { + stats[i].id = i; + + switch (names[i].source) { + case 1: + /* RX stat */ + stats[i].value = *((uint64_t *)&rx_ptr[names[i].offset]) - + nthw_xstats_reset_val[port][i]; + break; + + case 2: + /* TX stat */ + stats[i].value = *((uint64_t *)&tx_ptr[names[i].offset]) - + nthw_xstats_reset_val[port][i]; + break; + + case 3: + + /* FLM stat */ + if (flm_ptr) { + stats[i].value = *((uint64_t *)&flm_ptr[names[i].offset]) - + nthw_xstats_reset_val[0][i]; + + } else { + stats[i].value = 0; + } + + break; + + case 4: + + /* Port Load stat */ + if (pld_ptr) { + /* No reset */ + stats[i].value = *((uint64_t *)&pld_ptr[names[i].offset]); + + } else { + stats[i].value = 0; + } + + break; + + default: + stats[i].value = 0; + break; + } + } + + return i; +} + +static int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, + const uint64_t *ids, + uint64_t *values, + unsigned int n, + uint8_t port) +{ + unsigned int i; + uint8_t *pld_ptr; + uint8_t *flm_ptr; + uint8_t *rx_ptr; + uint8_t *tx_ptr; + uint32_t nb_names; + struct rte_nthw_xstats_names_s *names; + int count = 0; + + pld_ptr = (uint8_t *)&p_nt4ga_stat->mp_port_load[port]; + flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm; + rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port]; + tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port]; + + if (p_nt4ga_stat->flm_stat_ver < 18) { + names = nthw_cap_xstats_names_v1; + nb_names = NTHW_CAP_XSTATS_NAMES_V1; + + } else if (p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version < 7 || + p_nt4ga_stat->flm_stat_ver < 23) { + names = nthw_cap_xstats_names_v2; + nb_names = NTHW_CAP_XSTATS_NAMES_V2; + + } else { + names = nthw_cap_xstats_names_v3; + nb_names = NTHW_CAP_XSTATS_NAMES_V3; + } + + for (i = 0; i < n; i++) { + if (ids[i] < nb_names) { + switch (names[ids[i]].source) { + case 1: + /* RX stat */ + values[i] = *((uint64_t *)&rx_ptr[names[ids[i]].offset]) - + nthw_xstats_reset_val[port][ids[i]]; + break; + + case 2: + /* TX stat */ + values[i] = *((uint64_t *)&tx_ptr[names[ids[i]].offset]) - + nthw_xstats_reset_val[port][ids[i]]; + break; + + case 3: + + /* FLM stat */ + if (flm_ptr) { + values[i] = *((uint64_t *)&flm_ptr[names[ids[i]].offset]) - + nthw_xstats_reset_val[0][ids[i]]; + + } else { + values[i] = 0; + } + + break; + + case 4: + + /* Port Load stat */ + if (pld_ptr) { + /* No reset */ + values[i] = *((uint64_t *)&pld_ptr[names[i].offset]); + + } else { + values[i] = 0; + } + + break; + + default: + values[i] = 0; + break; + } + + count++; + } + } + + return count; +} + +static void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, uint8_t port) +{ + unsigned int i; + uint8_t *flm_ptr; + uint8_t *rx_ptr; + uint8_t *tx_ptr; + uint32_t nb_names; + struct rte_nthw_xstats_names_s *names; + + flm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm; + rx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port]; + tx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port]; + + if (p_nt4ga_stat->flm_stat_ver < 18) { + names = nthw_cap_xstats_names_v1; + nb_names = NTHW_CAP_XSTATS_NAMES_V1; + + } else if (p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version < 7 || + p_nt4ga_stat->flm_stat_ver < 23) { + names = nthw_cap_xstats_names_v2; + nb_names = NTHW_CAP_XSTATS_NAMES_V2; + + } else { + names = nthw_cap_xstats_names_v3; + nb_names = NTHW_CAP_XSTATS_NAMES_V3; + } + + for (i = 0; i < nb_names; i++) { + switch (names[i].source) { + case 1: + /* RX stat */ + nthw_xstats_reset_val[port][i] = *((uint64_t *)&rx_ptr[names[i].offset]); + break; + + case 2: + /* TX stat */ + nthw_xstats_reset_val[port][i] = *((uint64_t *)&tx_ptr[names[i].offset]); + break; + + case 3: + + /* FLM stat */ + /* Reset makes no sense for flm_count_current */ + /* Reset can't be used for load_lps, load_aps, max_lps and max_aps */ + if (flm_ptr && + (strcmp(names[i].name, "flm_count_current") != 0 && + strcmp(names[i].name, "flm_count_load_lps") != 0 && + strcmp(names[i].name, "flm_count_load_aps") != 0 && + strcmp(names[i].name, "flm_count_max_lps") != 0 && + strcmp(names[i].name, "flm_count_max_aps") != 0)) { + nthw_xstats_reset_val[0][i] = + *((uint64_t *)&flm_ptr[names[i].offset]); + } + + break; + + case 4: + /* Port load stat*/ + /* No reset */ + break; + + default: + break; + } + } +} + +/* + * These functions does not require stat mutex locked + */ +static int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + int count = 0; + unsigned int i; + uint32_t nb_names; + struct rte_nthw_xstats_names_s *names; + + if (p_nt4ga_stat->flm_stat_ver < 18) { + names = nthw_cap_xstats_names_v1; + nb_names = NTHW_CAP_XSTATS_NAMES_V1; + + } else if (p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version < 7 || + p_nt4ga_stat->flm_stat_ver < 23) { + names = nthw_cap_xstats_names_v2; + nb_names = NTHW_CAP_XSTATS_NAMES_V2; + + } else { + names = nthw_cap_xstats_names_v3; + nb_names = NTHW_CAP_XSTATS_NAMES_V3; + } + + if (!xstats_names) + return nb_names; + + for (i = 0; i < size && i < nb_names; i++) { + strlcpy(xstats_names[i].name, names[i].name, sizeof(xstats_names[i].name)); + count++; + } + + return count; +} + +static int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int size) +{ + int count = 0; + unsigned int i; + + uint32_t nb_names; + struct rte_nthw_xstats_names_s *names; + + if (p_nt4ga_stat->flm_stat_ver < 18) { + names = nthw_cap_xstats_names_v1; + nb_names = NTHW_CAP_XSTATS_NAMES_V1; + + } else if (p_nt4ga_stat->mp_nthw_stat->mn_stat_layout_version < 7 || + p_nt4ga_stat->flm_stat_ver < 23) { + names = nthw_cap_xstats_names_v2; + nb_names = NTHW_CAP_XSTATS_NAMES_V2; + + } else { + names = nthw_cap_xstats_names_v3; + nb_names = NTHW_CAP_XSTATS_NAMES_V3; + } + + if (!xstats_names) + return nb_names; + + for (i = 0; i < size; i++) { + if (ids[i] < nb_names) { + strlcpy(xstats_names[i].name, + names[ids[i]].name, + RTE_ETH_XSTATS_NAME_SIZE); + } + + count++; + } + + return count; +} + +static struct ntnic_xstats_ops ops = { + .nthw_xstats_get_names = nthw_xstats_get_names, + .nthw_xstats_get = nthw_xstats_get, + .nthw_xstats_reset = nthw_xstats_reset, + .nthw_xstats_get_names_by_id = nthw_xstats_get_names_by_id, + .nthw_xstats_get_by_id = nthw_xstats_get_by_id +}; + +void ntnic_xstats_ops_init(void) +{ + NT_LOG_DBGX(DBG, NTNIC, "xstats module was initialized"); + register_ntnic_xstats_ops(&ops); +} diff --git a/drivers/net/ntnic/ntutil/nt_util.h b/drivers/net/ntnic/ntutil/nt_util.h index 64947f5fbf..f2eccf3501 100644 --- a/drivers/net/ntnic/ntutil/nt_util.h +++ b/drivers/net/ntnic/ntutil/nt_util.h @@ -9,10 +9,22 @@ #include #include "nt4ga_link.h" +/* Total max VDPA ports */ +#define MAX_VDPA_PORTS 128UL + #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) RTE_DIM(arr) #endif +/* + * Windows size in seconds for measuring FLM load + * and Port load. + * The windows size must max be 3 min in order to + * prevent overflow. + */ +#define PORT_LOAD_WINDOWS_SIZE 2ULL +#define FLM_LOAD_WINDOWS_SIZE 2ULL + #define PCIIDENT_TO_DOMAIN(pci_ident) ((uint16_t)(((unsigned int)(pci_ident) >> 16) & 0xFFFFU)) #define PCIIDENT_TO_BUSNR(pci_ident) ((uint8_t)(((unsigned int)(pci_ident) >> 8) & 0xFFU)) #define PCIIDENT_TO_DEVNR(pci_ident) ((uint8_t)(((unsigned int)(pci_ident) >> 3) & 0x1FU)) diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c index 42cfe4250c..b4f8baf3b3 100644 --- a/drivers/net/octeon_ep/otx_ep_ethdev.c +++ b/drivers/net/octeon_ep/otx_ep_ethdev.c @@ -849,7 +849,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev) otx_ep_info("Using pkind %d.", otx_epvf->pkind); } else { otx_ep_err("Invalid chip id"); - return -EINVAL; + ret = -EINVAL; goto exit; } diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index 7869103c63..357981f63d 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -10,7 +10,9 @@ #include #include #include + #include +#include #include #include #include @@ -442,7 +444,7 @@ u32 qede_osal_log2(u32); #define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE #define OSAL_IOMEM volatile #define OSAL_UNUSED __rte_unused -#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0) +#define OSAL_UNLIKELY(x) unlikely(x) #define OSAL_MIN_T(type, __min1, __min2) RTE_MIN_T(__min1, __min2, type) #define OSAL_MAX_T(type, __max1, __max2) RTE_MAX_T(__max1, __max2, type) diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 36b06b3ac5..650ddbd706 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -1110,7 +1109,8 @@ tap_dev_close(struct rte_eth_dev *dev) struct pmd_process_private *process_private = dev->process_private; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - rte_free(dev->process_private); + free(dev->process_private); + dev->process_private = NULL; if (tap_devices_count == 1) rte_mp_action_unregister(TAP_MP_REQ_START_RXTX); tap_devices_count--; @@ -1171,7 +1171,9 @@ tap_dev_close(struct rte_eth_dev *dev) close(internals->ioctl_sock); internals->ioctl_sock = -1; } - rte_free(dev->process_private); + free(dev->process_private); + dev->process_private = NULL; + if (tap_devices_count == 1) rte_mp_action_unregister(TAP_MP_KEY); tap_devices_count--; @@ -1395,11 +1397,13 @@ tap_mac_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) mac_addr)) mode = LOCAL_AND_REMOTE; ifr.ifr_hwaddr.sa_family = AF_LOCAL; - rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN); + + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data); ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode); if (ret < 0) return ret; - rte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN); + + rte_ether_addr_copy(mac_addr, &pmd->eth_addr); #ifdef HAVE_TCA_FLOWER if (pmd->remote_if_index && !pmd->flow_isolate) { @@ -1922,14 +1926,13 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, goto error_exit_nodev; } - process_private = (struct pmd_process_private *) - rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private), - RTE_CACHE_LINE_SIZE, dev->device->numa_node); - + process_private = malloc(sizeof(struct pmd_process_private)); if (process_private == NULL) { TAP_LOG(ERR, "Failed to alloc memory for process private"); return -1; } + memset(process_private, 0, sizeof(struct pmd_process_private)); + pmd = dev->data->dev_private; dev->process_private = process_private; pmd->dev = dev; @@ -1987,7 +1990,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, if (rte_is_zero_ether_addr(mac_addr)) rte_eth_random_addr((uint8_t *)&pmd->eth_addr); else - rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr)); + rte_ether_addr_copy(mac_addr, &pmd->eth_addr); } /* @@ -2010,8 +2013,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, if (pmd->type == ETH_TUNTAP_TYPE_TAP) { memset(&ifr, 0, sizeof(struct ifreq)); ifr.ifr_hwaddr.sa_family = AF_LOCAL; - rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, - RTE_ETHER_ADDR_LEN); + rte_ether_addr_copy(&pmd->eth_addr, (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data); if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) goto error_exit; } @@ -2070,8 +2072,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, pmd->name, pmd->remote_iface); goto error_remote; } - rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data, - RTE_ETHER_ADDR_LEN); + + rte_ether_addr_copy((struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data, &pmd->eth_addr); /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) { TAP_LOG(ERR, "%s: failed to get %s MAC address.", @@ -2435,16 +2437,13 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) TAP_LOG(ERR, "Primary process is missing"); return -1; } - eth_dev->process_private = (struct pmd_process_private *) - rte_zmalloc_socket(name, - sizeof(struct pmd_process_private), - RTE_CACHE_LINE_SIZE, - eth_dev->device->numa_node); + eth_dev->process_private = malloc(sizeof(struct pmd_process_private)); if (eth_dev->process_private == NULL) { TAP_LOG(ERR, "Failed to alloc memory for process private"); return -1; } + memset(eth_dev->process_private, 0, sizeof(struct pmd_process_private)); ret = tap_mp_attach_queues(name, eth_dev); if (ret != 0) diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c index 51ec07eb5a..c0e44bb1a7 100644 --- a/drivers/net/tap/tap_flow.c +++ b/drivers/net/tap/tap_flow.c @@ -35,7 +35,7 @@ struct rte_flow { LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */ struct rte_flow *remote_flow; /* associated remote flow */ - struct nlmsg msg; + struct tap_nlmsg msg; }; struct convert_data { @@ -74,12 +74,12 @@ struct action_data { }; }; -static int tap_flow_create_eth(const struct rte_flow_item *item, void *data); -static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data); -static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data); -static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data); -static int tap_flow_create_udp(const struct rte_flow_item *item, void *data); -static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data); +static int tap_flow_create_eth(const struct rte_flow_item *item, struct convert_data *info); +static int tap_flow_create_vlan(const struct rte_flow_item *item, struct convert_data *info); +static int tap_flow_create_ipv4(const struct rte_flow_item *item, struct convert_data *info); +static int tap_flow_create_ipv6(const struct rte_flow_item *item, struct convert_data *info); +static int tap_flow_create_udp(const struct rte_flow_item *item, struct convert_data *info); +static int tap_flow_create_tcp(const struct rte_flow_item *item, struct convert_data *info); static int tap_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -139,19 +139,10 @@ struct tap_flow_items { * along with the item. */ const void *default_mask; - /** - * Conversion function from rte_flow to netlink attributes. - * - * @param item - * rte_flow item to convert. - * @param data - * Internal structure to store the conversion. - * - * @return - * 0 on success, negative value otherwise. - */ - int (*convert)(const struct rte_flow_item *item, void *data); - /** List of possible following items. */ + /* Conversion function from rte_flow to netlink attributes. */ + int (*convert)(const struct rte_flow_item *item, struct convert_data *info); + + /* List of possible following items. */ const enum rte_flow_item_type *const items; }; @@ -417,13 +408,12 @@ static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = { * 0 if checks are alright, -1 otherwise. */ static int -tap_flow_create_eth(const struct rte_flow_item *item, void *data) +tap_flow_create_eth(const struct rte_flow_item *item, struct convert_data *info) { - struct convert_data *info = (struct convert_data *)data; const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; struct rte_flow *flow = info->flow; - struct nlmsg *msg; + struct tap_nlmsg *msg; /* use default mask if none provided */ if (!mask) @@ -471,13 +461,12 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data) * 0 if checks are alright, -1 otherwise. */ static int -tap_flow_create_vlan(const struct rte_flow_item *item, void *data) +tap_flow_create_vlan(const struct rte_flow_item *item, struct convert_data *info) { - struct convert_data *info = (struct convert_data *)data; const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; struct rte_flow *flow = info->flow; - struct nlmsg *msg; + struct tap_nlmsg *msg; /* use default mask if none provided */ if (!mask) @@ -531,13 +520,12 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data) * 0 if checks are alright, -1 otherwise. */ static int -tap_flow_create_ipv4(const struct rte_flow_item *item, void *data) +tap_flow_create_ipv4(const struct rte_flow_item *item, struct convert_data *info) { - struct convert_data *info = (struct convert_data *)data; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; struct rte_flow *flow = info->flow; - struct nlmsg *msg; + struct tap_nlmsg *msg; /* use default mask if none provided */ if (!mask) @@ -586,14 +574,13 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data) * 0 if checks are alright, -1 otherwise. */ static int -tap_flow_create_ipv6(const struct rte_flow_item *item, void *data) +tap_flow_create_ipv6(const struct rte_flow_item *item, struct convert_data *info) { - struct convert_data *info = (struct convert_data *)data; const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; struct rte_flow *flow = info->flow; uint8_t empty_addr[16] = { 0 }; - struct nlmsg *msg; + struct tap_nlmsg *msg; /* use default mask if none provided */ if (!mask) @@ -642,13 +629,12 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data) * 0 if checks are alright, -1 otherwise. */ static int -tap_flow_create_udp(const struct rte_flow_item *item, void *data) +tap_flow_create_udp(const struct rte_flow_item *item, struct convert_data *info) { - struct convert_data *info = (struct convert_data *)data; const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; struct rte_flow *flow = info->flow; - struct nlmsg *msg; + struct tap_nlmsg *msg; /* use default mask if none provided */ if (!mask) @@ -688,13 +674,12 @@ tap_flow_create_udp(const struct rte_flow_item *item, void *data) * 0 if checks are alright, -1 otherwise. */ static int -tap_flow_create_tcp(const struct rte_flow_item *item, void *data) +tap_flow_create_tcp(const struct rte_flow_item *item, struct convert_data *info) { - struct convert_data *info = (struct convert_data *)data; const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; struct rte_flow *flow = info->flow; - struct nlmsg *msg; + struct tap_nlmsg *msg; /* use default mask if none provided */ if (!mask) @@ -820,7 +805,7 @@ tap_flow_item_validate(const struct rte_flow_item *item, static int add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata) { - struct nlmsg *msg = &flow->msg; + struct tap_nlmsg *msg = &flow->msg; if (tap_nlattr_nested_start(msg, (*act_index)++) < 0) return -1; @@ -891,7 +876,7 @@ static int add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data, int classifier_action) { - struct nlmsg *msg = &flow->msg; + struct tap_nlmsg *msg = &flow->msg; size_t act_index = 1; int i; @@ -1274,7 +1259,7 @@ tap_flow_create(struct rte_eth_dev *dev, struct pmd_internals *pmd = dev->data->dev_private; struct rte_flow *remote_flow = NULL; struct rte_flow *flow = NULL; - struct nlmsg *msg = NULL; + struct tap_nlmsg *msg = NULL; int err; if (!pmd->if_index) { @@ -1593,7 +1578,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, struct rte_flow_item_eth eth_local = { .hdr.ether_type = 0 }; unsigned int if_index = pmd->remote_if_index; struct rte_flow *remote_flow = NULL; - struct nlmsg *msg = NULL; + struct tap_nlmsg *msg = NULL; int err = 0; struct rte_flow_item items_local[2] = { [0] = { diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c index 35c491ac37..8a57c9242c 100644 --- a/drivers/net/tap/tap_netlink.c +++ b/drivers/net/tap/tap_netlink.c @@ -368,7 +368,7 @@ tap_nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data) * -1 if adding a nested netlink attribute failed, 0 otherwise. */ int -tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type) +tap_nlattr_nested_start(struct tap_nlmsg *msg, uint16_t type) { struct nested_tail *tail; @@ -400,7 +400,7 @@ tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type) * The netlink message where to edit the nested_tails metadata. */ void -tap_nlattr_nested_finish(struct nlmsg *msg) +tap_nlattr_nested_finish(struct tap_nlmsg *msg) { struct nested_tail *tail = msg->nested_tails; diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h index faa73ba163..466c47a6d7 100644 --- a/drivers/net/tap/tap_netlink.h +++ b/drivers/net/tap/tap_netlink.h @@ -16,7 +16,7 @@ #define NLMSG_BUF 512 -struct nlmsg { +struct tap_nlmsg { struct nlmsghdr nh; struct tcmsg t; char buf[NLMSG_BUF]; @@ -36,7 +36,7 @@ void tap_nlattr_add(struct nlmsghdr *nh, unsigned short type, void tap_nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data); void tap_nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data); void tap_nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data); -int tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type); -void tap_nlattr_nested_finish(struct nlmsg *msg); +int tap_nlattr_nested_start(struct tap_nlmsg *msg, uint16_t type); +void tap_nlattr_nested_finish(struct tap_nlmsg *msg); #endif /* _TAP_NETLINK_H_ */ diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c index a3aae3c814..1755b57519 100644 --- a/drivers/net/tap/tap_tcmsgs.c +++ b/drivers/net/tap/tap_tcmsgs.c @@ -42,7 +42,7 @@ struct qdisc_custom_arg { * Overrides the default netlink flags for this msg with those specified. */ void -tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t flags) +tc_init_msg(struct tap_nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t flags) { struct nlmsghdr *n = &msg->nh; @@ -72,7 +72,7 @@ tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t fla static int qdisc_del(int nlsk_fd, unsigned int ifindex, struct qdisc *qinfo) { - struct nlmsg msg; + struct tap_nlmsg msg; int fd = 0; tc_init_msg(&msg, ifindex, RTM_DELQDISC, 0); @@ -117,7 +117,7 @@ int qdisc_add_multiq(int nlsk_fd, unsigned int ifindex) { struct tc_multiq_qopt opt = {0}; - struct nlmsg msg; + struct tap_nlmsg msg; tc_init_msg(&msg, ifindex, RTM_NEWQDISC, NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE); @@ -146,7 +146,7 @@ qdisc_add_multiq(int nlsk_fd, unsigned int ifindex) int qdisc_add_ingress(int nlsk_fd, unsigned int ifindex) { - struct nlmsg msg; + struct tap_nlmsg msg; tc_init_msg(&msg, ifindex, RTM_NEWQDISC, NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE); @@ -211,7 +211,7 @@ static int qdisc_iterate(int nlsk_fd, unsigned int ifindex, int (*callback)(struct nlmsghdr *, void *), void *arg) { - struct nlmsg msg; + struct tap_nlmsg msg; struct list_args args = { .nlsk_fd = nlsk_fd, .ifindex = ifindex, diff --git a/drivers/net/tap/tap_tcmsgs.h b/drivers/net/tap/tap_tcmsgs.h index 9411626661..70e97e2b62 100644 --- a/drivers/net/tap/tap_tcmsgs.h +++ b/drivers/net/tap/tap_tcmsgs.h @@ -24,7 +24,7 @@ #define MULTIQ_MAJOR_HANDLE (1 << 16) -void tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, +void tc_init_msg(struct tap_nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t flags); int qdisc_list(int nlsk_fd, unsigned int ifindex); int qdisc_flush(int nlsk_fd, unsigned int ifindex); diff --git a/drivers/net/txgbe/base/txgbe_mng.c b/drivers/net/txgbe/base/txgbe_mng.c index 20db982891..7dc8f21183 100644 --- a/drivers/net/txgbe/base/txgbe_mng.c +++ b/drivers/net/txgbe/base/txgbe_mng.c @@ -58,6 +58,7 @@ txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) dword_len = length >> 2; + txgbe_flush(hw); /* The device driver writes the relevant command block * into the ram area. */ diff --git a/drivers/net/txgbe/base/txgbe_regs.h b/drivers/net/txgbe/base/txgbe_regs.h index 4ea4a2e3d8..7a9ba6976f 100644 --- a/drivers/net/txgbe/base/txgbe_regs.h +++ b/drivers/net/txgbe/base/txgbe_regs.h @@ -1197,7 +1197,8 @@ enum txgbe_5tuple_protocol { #define TXGBE_ICRMISC_ANDONE MS(19, 0x1) /* link auto-nego done */ #define TXGBE_ICRMISC_ERRIG MS(20, 0x1) /* integrity error */ #define TXGBE_ICRMISC_SPI MS(21, 0x1) /* SPI interface */ -#define TXGBE_ICRMISC_VFMBX MS(22, 0x1) /* VF-PF message box */ +#define TXGBE_ICRMISC_TXDESC MS(22, 0x1) /* TDM desc error */ +#define TXGBE_ICRMISC_VFMBX MS(23, 0x1) /* VF-PF message box */ #define TXGBE_ICRMISC_GPIO MS(26, 0x1) /* GPIO interrupt */ #define TXGBE_ICRMISC_ERRPCI MS(27, 0x1) /* pcie request error */ #define TXGBE_ICRMISC_HEAT MS(28, 0x1) /* overheat detection */ @@ -1382,6 +1383,11 @@ enum txgbe_5tuple_protocol { #define TXGBE_TXCFG_WTHRESH(v) LS(v, 16, 0x7F) #define TXGBE_TXCFG_FLUSH MS(26, 0x1) +#define TXGBE_TDM_DESC_CHK(i) (0x0180B0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_NONFATAL(i) (0x0180C0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_FATAL(i) (0x0180D0 + (i) * 4) /*0-3*/ +#define TXGBE_TDM_DESC_MASK(v) MS(v, 0x1) + /* interrupt registers */ #define TXGBE_ITRI 0x000180 #define TXGBE_ITR(i) (0x000200 + 4 * (i)) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 2834468764..a956216abb 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -331,6 +331,8 @@ txgbe_pf_reset_hw(struct txgbe_hw *hw) status = hw->mac.reset_hw(hw); ctrl_ext = rd32(hw, TXGBE_PORTCTL); + /* let hardware know driver is loaded */ + ctrl_ext |= TXGBE_PORTCTL_DRVLOAD; /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext |= TXGBE_PORTCTL_RSTDONE; wr32(hw, TXGBE_PORTCTL, ctrl_ext); @@ -1934,6 +1936,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); rte_eal_alarm_cancel(txgbe_dev_detect_sfp, dev); + rte_eal_alarm_cancel(txgbe_tx_queue_clear_error, dev); txgbe_dev_wait_setup_link_complete(dev, 0); /* disable interrupts */ @@ -2061,6 +2064,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) ret = txgbe_dev_stop(dev); + /* Let firmware take over control of hardware */ + wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_DRVLOAD, 0); + txgbe_dev_free_queues(dev); txgbe_set_pcie_master(hw, false); @@ -2338,6 +2344,7 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); struct txgbe_stat_mappings *stat_mappings = TXGBE_DEV_STAT_MAPPINGS(dev); + struct txgbe_tx_queue *txq; uint32_t i, j; txgbe_read_stats_registers(hw, hw_stats); @@ -2392,6 +2399,11 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Tx Errors */ stats->oerrors = 0; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + stats->oerrors += txq->desc_error; + } + return 0; } @@ -2400,6 +2412,13 @@ txgbe_dev_stats_reset(struct rte_eth_dev *dev) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); + struct txgbe_tx_queue *txq; + uint32_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->desc_error = 0; + } /* HW registers are cleared on read */ hw->offset_loaded = 0; @@ -2833,6 +2852,60 @@ txgbe_dev_setup_link_alarm_handler(void *param) intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; } +static void +txgbe_do_reset(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + u32 i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->resetting = true; + } + + rte_delay_ms(1); + wr32(hw, TXGBE_RST, TXGBE_RST_LAN(hw->bus.lan_id)); + txgbe_flush(hw); + + PMD_DRV_LOG(ERR, "Please manually restart the port %d", + dev->data->port_id); +} + +static void +txgbe_tx_ring_recovery(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + u32 desc_error[4] = {0, 0, 0, 0}; + struct txgbe_tx_queue *txq; + u32 i; + + /* check tdm fatal error */ + for (i = 0; i < 4; i++) { + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_FATAL(i)); + if (desc_error[i] != 0) { + PMD_DRV_LOG(ERR, "TDM fatal error reg[%d]: 0x%x", i, desc_error[i]); + txgbe_do_reset(dev); + return; + } + } + + /* check tdm non-fatal error */ + for (i = 0; i < 4; i++) + desc_error[i] = rd32(hw, TXGBE_TDM_DESC_NONFATAL(i)); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (desc_error[i / 32] & (1 << i % 32)) { + PMD_DRV_LOG(ERR, "TDM non-fatal error, reset port[%d] queue[%d]", + dev->data->port_id, i); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + txq = dev->data->tx_queues[i]; + txq->resetting = true; + rte_eal_alarm_set(1000, txgbe_tx_queue_clear_error, (void *)dev); + } + } +} + /* * If @timeout_ms was 0, it means that it will not return until link complete. * It returns 1 on complete, return 0 on timeout. @@ -3091,6 +3164,7 @@ txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) intr->mask |= mask; intr->mask_misc |= TXGBE_ICRMISC_GPIO; intr->mask_misc |= TXGBE_ICRMISC_ANDONE; + intr->mask_misc |= TXGBE_ICRMISC_TXDESC; return 0; } @@ -3186,6 +3260,9 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, if (eicr & TXGBE_ICRMISC_HEAT) intr->flags |= TXGBE_FLAG_OVERHEAT; + if (eicr & TXGBE_ICRMISC_TXDESC) + intr->flags |= TXGBE_FLAG_TX_DESC_ERR; + ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC] = 0; return 0; @@ -3305,6 +3382,11 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev, intr->flags &= ~TXGBE_FLAG_OVERHEAT; } + if (intr->flags & TXGBE_FLAG_TX_DESC_ERR) { + txgbe_tx_ring_recovery(dev); + intr->flags &= ~TXGBE_FLAG_TX_DESC_ERR; + } + PMD_DRV_LOG(DEBUG, "enable intr immediately"); txgbe_enable_intr(dev); rte_intr_enable(intr_handle); diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index f0f4ced5b0..302ea9f037 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -31,6 +31,7 @@ #define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) #define TXGBE_FLAG_NEED_AN_CONFIG (uint32_t)(1 << 5) #define TXGBE_FLAG_OVERHEAT (uint32_t)(1 << 6) +#define TXGBE_FLAG_TX_DESC_ERR (uint32_t)(1 << 7) /* * Defines that were not part of txgbe_type.h as they are not used by the @@ -474,6 +475,8 @@ int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void txgbe_tx_queue_clear_error(void *param); + void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 5bc0f8772f..4e4b78fb43 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -160,6 +160,8 @@ tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts) for (i = 0; i < 4; ++i, ++txdp, ++pkts) { buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = TXGBE_FRAME_SIZE_DFT; /* write data to descriptor */ txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr); @@ -180,6 +182,8 @@ tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts) buf_dma_addr = rte_mbuf_data_iova(*pkts); pkt_len = (*pkts)->data_len; + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = TXGBE_FRAME_SIZE_DFT; /* write data to descriptor */ txdp->qw0 = cpu_to_le64(buf_dma_addr); @@ -728,6 +732,66 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) return tun_len; } +static inline void +txgbe_fix_offload_len(union txgbe_tx_offload *ol) +{ + uint8_t ptid = ol->ptid; + + if (ptid & TXGBE_PTID_PKT_TUN) { + if (ol->outer_l2_len == 0) + ol->outer_l2_len = sizeof(struct rte_ether_hdr); + if (ol->outer_l3_len == 0) { + if (ptid & TXGBE_PTID_TUN_IPV6) + ol->outer_l3_len = sizeof(struct rte_ipv6_hdr); + else + ol->outer_l3_len = sizeof(struct rte_ipv4_hdr); + } + if ((ptid & 0xF) == 0) { + ol->l3_len = 0; + ol->l4_len = 0; + } else { + goto inner; + } + } + + if ((ptid & 0xF0) == TXGBE_PTID_PKT_MAC) { + if (ol->l2_len == 0) + ol->l2_len = sizeof(struct rte_ether_hdr); + ol->l3_len = 0; + ol->l4_len = 0; + } else if ((ptid & 0xF0) == TXGBE_PTID_PKT_IP) { + if (ol->l2_len == 0) + ol->l2_len = sizeof(struct rte_ether_hdr); +inner: + if (ol->l3_len == 0) { + if (ptid & TXGBE_PTID_PKT_IPV6) + ol->l3_len = sizeof(struct rte_ipv6_hdr); + else + ol->l3_len = sizeof(struct rte_ipv4_hdr); + } + switch (ptid & 0x7) { + case 0x1: + case 0x2: + ol->l4_len = 0; + break; + case 0x3: + if (ol->l4_len == 0) + ol->l4_len = sizeof(struct rte_udp_hdr); + break; + case 0x4: + if (ol->l4_len == 0) + ol->l4_len = sizeof(struct rte_tcp_hdr); + break; + case 0x5: + if (ol->l4_len == 0) + ol->l4_len = sizeof(struct rte_sctp_hdr); + break; + default: + break; + } + } +} + static inline uint8_t txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt, uint8_t tun_len) { @@ -753,6 +817,30 @@ txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt, uint8_t tun_len) return ptid; } +static inline bool +txgbe_check_pkt_err(struct rte_mbuf *tx_pkt) +{ + uint32_t total_len = 0, nb_seg = 0; + struct rte_mbuf *mseg; + + mseg = tx_pkt; + do { + if (mseg->data_len == 0) + return true; + total_len += mseg->data_len; + nb_seg++; + mseg = mseg->next; + } while (mseg != NULL); + + if (tx_pkt->pkt_len != total_len || tx_pkt->pkt_len == 0) + return true; + + if (tx_pkt->nb_segs != nb_seg || tx_pkt->nb_segs > 64) + return true; + + return false; +} + uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -782,6 +870,10 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint8_t use_ipsec; #endif + txq = tx_queue; + if (txq->resetting) + return 0; + tx_offload.data[0] = 0; tx_offload.data[1] = 0; txq = tx_queue; @@ -800,6 +892,12 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { new_ctx = 0; tx_pkt = *tx_pkts++; + if (txgbe_check_pkt_err(tx_pkt)) { + rte_pktmbuf_free(tx_pkt); + txq->desc_error++; + continue; + } + pkt_len = tx_pkt->pkt_len; /* @@ -826,6 +924,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt, tx_offload.outer_tun_len); + txgbe_fix_offload_len(&tx_offload); #ifdef RTE_LIB_SECURITY if (use_ipsec) { @@ -2284,8 +2383,7 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev) tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT; - tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | - RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; + tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM; #ifdef RTE_LIB_SECURITY if (dev->security_ctx) @@ -2426,6 +2524,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txgbe_set_tx_function(dev, txq); txq->ops->reset(txq); + txq->desc_error = 0; dev->data->tx_queues[queue_idx] = txq; @@ -4571,6 +4670,11 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev) wr32(hw, TXGBE_TXWP(txq->reg_idx), 0); } +#ifndef RTE_LIB_SECURITY + for (i = 0; i < 4; i++) + wr32(hw, TXGBE_TDM_DESC_CHK(i), 0xFFFFFFFF); +#endif + /* Device configured with multiple TX queues. */ txgbe_dev_mq_tx_configure(dev); } @@ -4807,6 +4911,7 @@ txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) rte_wmb(); wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + txq->resetting = false; return 0; } @@ -4864,6 +4969,39 @@ txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +void +txgbe_tx_queue_clear_error(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + u32 i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq->resetting) + continue; + + /* Increase the count of Tx desc error since + * it causes the queue reset. + */ + txq->desc_error++; + txgbe_dev_save_tx_queue(hw, i); + + /* tx ring reset */ + wr32(hw, TXGBE_TDM_DESC_NONFATAL(i / 32), + TXGBE_TDM_DESC_MASK(i % 32)); + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + + txgbe_dev_store_tx_queue(hw, i); + txgbe_dev_tx_queue_start(dev, i); + } +} + void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo) diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index 9155eb1f70..622a0d3981 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -412,6 +412,8 @@ struct txgbe_tx_queue { /**< indicates that IPsec TX feature is in use */ #endif const struct rte_memzone *mz; + uint64_t desc_error; + bool resetting; }; struct txgbe_txq_ops { diff --git a/drivers/net/txgbe/txgbe_rxtx_vec_neon.c b/drivers/net/txgbe/txgbe_rxtx_vec_neon.c index a96baf9b1d..d4d647fab5 100644 --- a/drivers/net/txgbe/txgbe_rxtx_vec_neon.c +++ b/drivers/net/txgbe/txgbe_rxtx_vec_neon.c @@ -476,9 +476,13 @@ static inline void vtx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { - uint64x2_t descriptor = { - pkt->buf_iova + pkt->data_off, - (uint64_t)pkt->pkt_len << 45 | flags | pkt->data_len}; + uint16_t pkt_len = pkt->data_len; + + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = TXGBE_FRAME_SIZE_DFT; + + uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, + (uint64_t)pkt_len << 45 | flags | pkt_len}; vst1q_u64((uint64_t *)(uintptr_t)txdp, descriptor); } diff --git a/drivers/net/txgbe/txgbe_rxtx_vec_sse.c b/drivers/net/txgbe/txgbe_rxtx_vec_sse.c index 1a3f2ce3cd..8ecce33471 100644 --- a/drivers/net/txgbe/txgbe_rxtx_vec_sse.c +++ b/drivers/net/txgbe/txgbe_rxtx_vec_sse.c @@ -607,9 +607,14 @@ static inline void vtx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { - __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 45 | - flags | pkt->data_len, - pkt->buf_iova + pkt->data_off); + uint16_t pkt_len = pkt->data_len; + __m128i descriptor; + + if (pkt_len < RTE_ETHER_HDR_LEN) + pkt_len = TXGBE_FRAME_SIZE_DFT; + + descriptor = _mm_set_epi64x((uint64_t)pkt_len << 45 | flags | pkt_len, + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)(uintptr_t)txdp, descriptor); } diff --git a/drivers/net/vmxnet3/base/vmxnet3_osdep.h b/drivers/net/vmxnet3/base/vmxnet3_osdep.h index 381a68db69..b1cd9ed056 100644 --- a/drivers/net/vmxnet3/base/vmxnet3_osdep.h +++ b/drivers/net/vmxnet3/base/vmxnet3_osdep.h @@ -7,13 +7,15 @@ #include +#include + typedef uint64_t uint64; typedef uint32_t uint32; typedef uint16_t uint16; typedef uint8_t uint8; #ifndef UNLIKELY -#define UNLIKELY(x) __builtin_expect((x),0) +#define UNLIKELY(x) unlikely(x) #endif /* unlikely */ #endif /* _VMXNET3_OSDEP_H */ diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 78fac63ab6..15ca25b187 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -403,6 +403,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; + hw->adapter_stopped = TRUE; hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; @@ -1470,42 +1471,52 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct vmxnet3_hw *hw = dev->data->dev_private; struct UPT1_TxStats txStats; struct UPT1_RxStats rxStats; + uint64_t packets, bytes; VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); for (i = 0; i < hw->num_tx_queues; i++) { vmxnet3_tx_stats_get(hw, i, &txStats); - stats->q_opackets[i] = txStats.ucastPktsTxOK + + packets = txStats.ucastPktsTxOK + txStats.mcastPktsTxOK + txStats.bcastPktsTxOK; - stats->q_obytes[i] = txStats.ucastBytesTxOK + + bytes = txStats.ucastBytesTxOK + txStats.mcastBytesTxOK + txStats.bcastBytesTxOK; - stats->opackets += stats->q_opackets[i]; - stats->obytes += stats->q_obytes[i]; + stats->opackets += packets; + stats->obytes += bytes; stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = packets; + stats->q_obytes[i] = bytes; + } } for (i = 0; i < hw->num_rx_queues; i++) { vmxnet3_rx_stats_get(hw, i, &rxStats); - stats->q_ipackets[i] = rxStats.ucastPktsRxOK + + packets = rxStats.ucastPktsRxOK + rxStats.mcastPktsRxOK + rxStats.bcastPktsRxOK; - stats->q_ibytes[i] = rxStats.ucastBytesRxOK + + bytes = rxStats.ucastBytesRxOK + rxStats.mcastBytesRxOK + rxStats.bcastBytesRxOK; - stats->ipackets += stats->q_ipackets[i]; - stats->ibytes += stats->q_ibytes[i]; - - stats->q_errors[i] = rxStats.pktsRxError; + stats->ipackets += packets; + stats->ibytes += bytes; stats->ierrors += rxStats.pktsRxError; stats->imissed += rxStats.pktsRxOutOfBuf; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = packets; + stats->q_ibytes[i] = bytes; + stats->q_errors[i] = rxStats.pktsRxError; + } } return 0; @@ -1521,8 +1532,6 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev) VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); - RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); - for (i = 0; i < hw->num_tx_queues; i++) { vmxnet3_hw_tx_stats_get(hw, i, &txStats); memcpy(&hw->snapshot_tx_stats[i], &txStats, @@ -1566,7 +1575,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev, dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ dev_info->min_mtu = VMXNET3_MIN_MTU; - dev_info->max_mtu = VMXNET3_MAX_MTU; + dev_info->max_mtu = VMXNET3_VERSION_GE_6(hw) ? VMXNET3_V6_MAX_MTU : VMXNET3_MAX_MTU; dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G; dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h index 2b3e2c4caa..e9ded6663d 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -121,8 +121,8 @@ struct vmxnet3_hw { #define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t)) UPT1_TxStats saved_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; UPT1_RxStats saved_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; - UPT1_TxStats snapshot_tx_stats[VMXNET3_MAX_TX_QUEUES]; - UPT1_RxStats snapshot_rx_stats[VMXNET3_MAX_RX_QUEUES]; + UPT1_TxStats snapshot_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; + UPT1_RxStats snapshot_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; uint16_t tx_prod_offset; uint16_t rx_prod_offset[2]; /* device capability bit map */ diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build new file mode 100644 index 0000000000..c9960f4c73 --- /dev/null +++ b/drivers/net/zxdh/meson.build @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 ZTE Corporation + +if not is_linux + build = false + reason = 'only supported on Linux' + subdir_done() +endif + +if not dpdk_conf.has('RTE_ARCH_X86_64') or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on x86_64 and aarch64' + subdir_done() +endif + +sources = files( + 'zxdh_common.c', + 'zxdh_ethdev.c', + 'zxdh_msg.c', + 'zxdh_pci.c', + 'zxdh_queue.c', +) diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c new file mode 100644 index 0000000000..4f18c97ed7 --- /dev/null +++ b/drivers/net/zxdh/zxdh_common.c @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include + +#include +#include +#include + +#include "zxdh_ethdev.h" +#include "zxdh_logs.h" +#include "zxdh_msg.h" +#include "zxdh_common.h" + +#define ZXDH_MSG_RSP_SIZE_MAX 512 + +#define ZXDH_COMMON_TABLE_READ 0 +#define ZXDH_COMMON_TABLE_WRITE 1 + +#define ZXDH_COMMON_FIELD_PHYPORT 6 +#define ZXDH_COMMON_FIELD_DATACH 3 + +#define ZXDH_RSC_TBL_CONTENT_LEN_MAX (257 * 2) + +#define ZXDH_REPS_HEADER_OFFSET 4 +#define ZXDH_TBL_MSG_PRO_SUCCESS 0xaa + +struct zxdh_common_msg { + uint8_t type; /* 0:read table 1:write table */ + uint8_t field; + uint16_t pcie_id; + uint16_t slen; /* Data length for write table */ + uint16_t reserved; +} __rte_packed; + +struct zxdh_common_rsp_hdr { + uint8_t rsp_status; + uint16_t rsp_len; + uint8_t reserved; + uint8_t payload_status; + uint8_t rsv; + uint16_t payload_len; +} __rte_packed; + +struct zxdh_tbl_msg_header { + uint8_t type; + uint8_t field; + uint16_t pcieid; + uint16_t slen; + uint16_t rsv; +}; + +struct zxdh_tbl_msg_reps_header { + uint8_t check; + uint8_t rsv; + uint16_t len; +}; + +static int32_t +zxdh_fill_common_msg(struct zxdh_hw *hw, struct zxdh_pci_bar_msg *desc, + uint8_t type, uint8_t field, + void *buff, uint16_t buff_size) +{ + uint64_t msg_len = sizeof(struct zxdh_common_msg) + buff_size; + + desc->payload_addr = rte_zmalloc(NULL, msg_len, 0); + if (unlikely(desc->payload_addr == NULL)) { + PMD_DRV_LOG(ERR, "Failed to allocate msg_data"); + return -ENOMEM; + } + memset(desc->payload_addr, 0, msg_len); + desc->payload_len = msg_len; + struct zxdh_common_msg *msg_data = (struct zxdh_common_msg *)desc->payload_addr; + + msg_data->type = type; + msg_data->field = field; + msg_data->pcie_id = hw->pcie_id; + msg_data->slen = buff_size; + if (buff_size != 0) + rte_memcpy(msg_data + 1, buff, buff_size); + + return 0; +} + +static int32_t +zxdh_send_command(struct zxdh_hw *hw, struct zxdh_pci_bar_msg *desc, + enum ZXDH_BAR_MODULE_ID module_id, + struct zxdh_msg_recviver_mem *msg_rsp) +{ + desc->virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET); + desc->src = hw->is_pf ? ZXDH_MSG_CHAN_END_PF : ZXDH_MSG_CHAN_END_VF; + desc->dst = ZXDH_MSG_CHAN_END_RISC; + desc->module_id = module_id; + desc->src_pcieid = hw->pcie_id; + + msg_rsp->buffer_len = ZXDH_MSG_RSP_SIZE_MAX; + msg_rsp->recv_buffer = rte_zmalloc(NULL, msg_rsp->buffer_len, 0); + if (unlikely(msg_rsp->recv_buffer == NULL)) { + PMD_DRV_LOG(ERR, "Failed to allocate messages response"); + return -ENOMEM; + } + + if (zxdh_bar_chan_sync_msg_send(desc, msg_rsp) != ZXDH_BAR_MSG_OK) { + PMD_DRV_LOG(ERR, "Failed to send sync messages or receive response"); + rte_free(msg_rsp->recv_buffer); + return -1; + } + + return 0; +} + +static int32_t +zxdh_common_rsp_check(struct zxdh_msg_recviver_mem *msg_rsp, + void *buff, uint16_t len) +{ + struct zxdh_common_rsp_hdr *rsp_hdr = (struct zxdh_common_rsp_hdr *)msg_rsp->recv_buffer; + + if (rsp_hdr->payload_status != 0xaa || rsp_hdr->payload_len != len) { + PMD_DRV_LOG(ERR, "Common response is invalid, status:0x%x rsp_len:%d", + rsp_hdr->payload_status, rsp_hdr->payload_len); + return -1; + } + if (len != 0) + rte_memcpy(buff, rsp_hdr + 1, len); + + return 0; +} + +static int32_t +zxdh_common_table_read(struct zxdh_hw *hw, uint8_t field, + void *buff, uint16_t buff_size) +{ + struct zxdh_msg_recviver_mem msg_rsp; + struct zxdh_pci_bar_msg desc; + int32_t ret = 0; + + if (!hw->msg_chan_init) { + PMD_DRV_LOG(ERR, "Bar messages channel not initialized"); + return -1; + } + + ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_READ, field, NULL, 0); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to fill common msg"); + return ret; + } + + ret = zxdh_send_command(hw, &desc, ZXDH_BAR_MODULE_TBL, &msg_rsp); + if (ret != 0) + goto free_msg_data; + + ret = zxdh_common_rsp_check(&msg_rsp, buff, buff_size); + if (ret != 0) + goto free_rsp_data; + +free_rsp_data: + rte_free(msg_rsp.recv_buffer); +free_msg_data: + rte_free(desc.payload_addr); + return ret; +} + +int32_t +zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + int32_t ret = zxdh_common_table_read(hw, ZXDH_COMMON_FIELD_PHYPORT, + (void *)phyport, sizeof(*phyport)); + return ret; +} + +static inline void +zxdh_fill_res_para(struct rte_eth_dev *dev, struct zxdh_res_para *param) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + param->pcie_id = hw->pcie_id; + param->virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET; + param->src_type = ZXDH_BAR_MODULE_TBL; +} + +static int +zxdh_get_res_info(struct zxdh_res_para *dev, uint8_t field, uint8_t *res, uint16_t *len) +{ + struct zxdh_pci_bar_msg in = {0}; + uint8_t recv_buf[ZXDH_RSC_TBL_CONTENT_LEN_MAX + 8] = {0}; + int ret = 0; + + if (!res || !dev) + return ZXDH_BAR_MSG_ERR_NULL; + + struct zxdh_tbl_msg_header tbl_msg = { + .type = ZXDH_TBL_TYPE_READ, + .field = field, + .pcieid = dev->pcie_id, + .slen = 0, + .rsv = 0, + }; + + in.virt_addr = dev->virt_addr; + in.payload_addr = &tbl_msg; + in.payload_len = sizeof(tbl_msg); + in.src = dev->src_type; + in.dst = ZXDH_MSG_CHAN_END_RISC; + in.module_id = ZXDH_BAR_MODULE_TBL; + in.src_pcieid = dev->pcie_id; + + struct zxdh_msg_recviver_mem result = { + .recv_buffer = recv_buf, + .buffer_len = sizeof(recv_buf), + }; + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + + if (ret != ZXDH_BAR_MSG_OK) { + PMD_DRV_LOG(ERR, + "send sync_msg failed. pcieid: 0x%x, ret: %d.", dev->pcie_id, ret); + return ret; + } + struct zxdh_tbl_msg_reps_header *tbl_reps = + (struct zxdh_tbl_msg_reps_header *)(recv_buf + ZXDH_REPS_HEADER_OFFSET); + + if (tbl_reps->check != ZXDH_TBL_MSG_PRO_SUCCESS) { + PMD_DRV_LOG(ERR, + "get resource_field failed. pcieid: 0x%x, ret: %d.", dev->pcie_id, ret); + return ret; + } + *len = tbl_reps->len; + rte_memcpy(res, (recv_buf + ZXDH_REPS_HEADER_OFFSET + + sizeof(struct zxdh_tbl_msg_reps_header)), *len); + return ret; +} + +static int +zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id) +{ + uint8_t reps = 0; + uint16_t reps_len = 0; + + if (zxdh_get_res_info(in, ZXDH_TBL_FIELD_PNLID, &reps, &reps_len) != ZXDH_BAR_MSG_OK) + return -1; + + *panel_id = reps; + return ZXDH_BAR_MSG_OK; +} + +int32_t +zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *panelid) +{ + struct zxdh_res_para param; + + zxdh_fill_res_para(dev, ¶m); + int32_t ret = zxdh_get_res_panel_id(¶m, panelid); + return ret; +} + +uint32_t +zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]); + uint32_t val = *((volatile uint32_t *)(baseaddr + reg)); + return val; +} + +void +zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]); + *((volatile uint32_t *)(baseaddr + reg)) = val; +} + +static bool +zxdh_try_lock(struct zxdh_hw *hw) +{ + uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG); + + /* check whether lock is used */ + if (!(var & ZXDH_VF_LOCK_ENABLE_MASK)) + return false; + + return true; +} + +int32_t +zxdh_timedlock(struct zxdh_hw *hw, uint32_t us) +{ + uint16_t timeout = 0; + + while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) { + rte_delay_us_block(us); + /* acquire hw lock */ + if (!zxdh_try_lock(hw)) { + PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout: %d", timeout); + continue; + } + break; + } + if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) { + PMD_DRV_LOG(ERR, "Failed to acquire channel"); + return -1; + } + return 0; +} + +void +zxdh_release_lock(struct zxdh_hw *hw) +{ + uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG); + + if (var & ZXDH_VF_LOCK_ENABLE_MASK) { + var &= ~ZXDH_VF_LOCK_ENABLE_MASK; + zxdh_write_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG, var); + } +} + +uint32_t +zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg) +{ + uint32_t val = *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)); + return val; +} + +void +zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val) +{ + *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val; +} + +static int32_t +zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field, + void *buff, uint16_t buff_size) +{ + struct zxdh_pci_bar_msg desc; + struct zxdh_msg_recviver_mem msg_rsp; + int32_t ret = 0; + + if (!hw->msg_chan_init) { + PMD_DRV_LOG(ERR, "Bar messages channel not initialized"); + return -1; + } + if (buff_size != 0 && buff == NULL) { + PMD_DRV_LOG(ERR, "Buff is invalid"); + return -1; + } + + ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE, + field, buff, buff_size); + + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to fill common msg"); + return ret; + } + + ret = zxdh_send_command(hw, &desc, ZXDH_BAR_MODULE_TBL, &msg_rsp); + if (ret != 0) + goto free_msg_data; + + ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0); + if (ret != 0) + goto free_rsp_data; + +free_rsp_data: + rte_free(msg_rsp.recv_buffer); +free_msg_data: + rte_free(desc.payload_addr); + return ret; +} + +int32_t +zxdh_datach_set(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t buff_size = (hw->queue_num + 1) * 2; + int32_t ret = 0; + uint16_t i; + + void *buff = rte_zmalloc(NULL, buff_size, 0); + if (unlikely(buff == NULL)) { + PMD_DRV_LOG(ERR, "Failed to allocate buff"); + return -ENOMEM; + } + memset(buff, 0, buff_size); + uint16_t *pdata = (uint16_t *)buff; + *pdata++ = hw->queue_num; + + for (i = 0; i < hw->queue_num; i++) + *(pdata + i) = hw->channel_context[i].ph_chno; + + ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH, + (void *)buff, buff_size); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to setup data channel of common table"); + + rte_free(buff); + return ret; +} diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h new file mode 100644 index 0000000000..72c29e1522 --- /dev/null +++ b/drivers/net/zxdh/zxdh_common.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_COMMON_H +#define ZXDH_COMMON_H + +#include +#include + +#include "zxdh_ethdev.h" + +#define ZXDH_VF_LOCK_REG 0x90 +#define ZXDH_VF_LOCK_ENABLE_MASK 0x1 +#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX 10 + +struct zxdh_res_para { + uint64_t virt_addr; + uint16_t pcie_id; + uint16_t src_type; /* refer to BAR_DRIVER_TYPE */ +}; + +int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport); +int32_t zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *pannelid); +uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg); +void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val); +void zxdh_release_lock(struct zxdh_hw *hw); +int32_t zxdh_timedlock(struct zxdh_hw *hw, uint32_t us); +uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg); +void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val); +int32_t zxdh_datach_set(struct rte_eth_dev *dev); + +#endif /* ZXDH_COMMON_H */ diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c new file mode 100644 index 0000000000..c786198535 --- /dev/null +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -0,0 +1,1041 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include +#include + +#include "zxdh_ethdev.h" +#include "zxdh_logs.h" +#include "zxdh_pci.h" +#include "zxdh_msg.h" +#include "zxdh_common.h" +#include "zxdh_queue.h" + +struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS]; + +uint16_t +zxdh_vport_to_vfid(union zxdh_virport_num v) +{ + /* epid > 4 is local soft queue. return 1192 */ + if (v.epid > 4) + return 1192; + if (v.vf_flag) + return v.epid * 256 + v.vfid; + else + return (v.epid * 8 + v.pfid) + 1152; +} + +static int32_t +zxdh_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + dev_info->speed_capa = rte_eth_speed_bitflag(hw->speed, RTE_ETH_LINK_FULL_DUPLEX); + dev_info->max_rx_queues = RTE_MIN(hw->max_queue_pairs, ZXDH_RX_QUEUES_MAX); + dev_info->max_tx_queues = RTE_MIN(hw->max_queue_pairs, ZXDH_TX_QUEUES_MAX); + dev_info->min_rx_bufsize = ZXDH_MIN_RX_BUFSIZE; + dev_info->max_rx_pktlen = ZXDH_MAX_RX_PKTLEN; + dev_info->max_mac_addrs = ZXDH_MAX_MAC_ADDRS; + dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_QINQ_STRIP); + dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM); + dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_SCATTER); + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO; + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + + dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS); + dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_UDP_TSO); + dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_QINQ_INSERT | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO); + dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM); + + return 0; +} + +static void +zxdh_queues_unbind_intr(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int32_t i; + + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR); + ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR); + } +} + + +static int32_t +zxdh_intr_unmask(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (rte_intr_ack(dev->intr_handle) < 0) + return -1; + + hw->use_msix = zxdh_pci_msix_detect(RTE_ETH_DEV_TO_PCI(dev)); + + return 0; +} + +static void +zxdh_devconf_intr_handler(void *param) +{ + struct rte_eth_dev *dev = param; + + if (zxdh_intr_unmask(dev) < 0) + PMD_DRV_LOG(ERR, "interrupt enable failed"); +} + + +/* Interrupt handler triggered by NIC for handling specific interrupt. */ +static void +zxdh_fromriscv_intr_handler(void *param) +{ + struct rte_eth_dev *dev = param; + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET); + + if (hw->is_pf) { + PMD_DRV_LOG(DEBUG, "zxdh_risc2pf_intr_handler"); + zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_RISC, ZXDH_MSG_CHAN_END_PF, virt_addr, dev); + } else { + PMD_DRV_LOG(DEBUG, "zxdh_riscvf_intr_handler"); + zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_RISC, ZXDH_MSG_CHAN_END_VF, virt_addr, dev); + } +} + +/* Interrupt handler triggered by NIC for handling specific interrupt. */ +static void +zxdh_frompfvf_intr_handler(void *param) +{ + struct rte_eth_dev *dev = param; + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET); + + if (hw->is_pf) { + PMD_DRV_LOG(DEBUG, "zxdh_vf2pf_intr_handler"); + zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_VF, ZXDH_MSG_CHAN_END_PF, virt_addr, dev); + } else { + PMD_DRV_LOG(DEBUG, "zxdh_pf2vf_intr_handler"); + zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_PF, ZXDH_MSG_CHAN_END_VF, virt_addr, dev); + } +} + +static void +zxdh_intr_cb_reg(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev); + + /* register callback to update dev config intr */ + rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev); + /* Register rsic_v to pf interrupt callback */ + struct rte_intr_handle *tmp = hw->risc_intr + + (ZXDH_MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE); + + rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev); + + tmp = hw->risc_intr + (ZXDH_MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE); + rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev); +} + +static void +zxdh_intr_cb_unreg(struct rte_eth_dev *dev) +{ + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev); + + struct zxdh_hw *hw = dev->data->dev_private; + + /* register callback to update dev config intr */ + rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev); + /* Register rsic_v to pf interrupt callback */ + struct rte_intr_handle *tmp = hw->risc_intr + + (ZXDH_MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE); + + rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev); + tmp = hw->risc_intr + (ZXDH_MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE); + rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev); +} + +static int32_t +zxdh_intr_disable(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (!hw->intr_enabled) + return 0; + + zxdh_intr_cb_unreg(dev); + if (rte_intr_disable(dev->intr_handle) < 0) + return -1; + + hw->intr_enabled = 0; + return 0; +} + +static int32_t +zxdh_intr_enable(struct rte_eth_dev *dev) +{ + int ret = 0; + struct zxdh_hw *hw = dev->data->dev_private; + + if (!hw->intr_enabled) { + zxdh_intr_cb_reg(dev); + ret = rte_intr_enable(dev->intr_handle); + if (unlikely(ret)) + PMD_DRV_LOG(ERR, "Failed to enable %s intr", dev->data->name); + + hw->intr_enabled = 1; + } + return ret; +} + +static int32_t +zxdh_intr_release(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + ZXDH_VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR); + + zxdh_queues_unbind_intr(dev); + zxdh_intr_disable(dev); + + rte_intr_efd_disable(dev->intr_handle); + rte_intr_vec_list_free(dev->intr_handle); + rte_free(hw->risc_intr); + hw->risc_intr = NULL; + rte_free(hw->dtb_intr); + hw->dtb_intr = NULL; + return 0; +} + +static int32_t +zxdh_setup_risc_interrupts(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint8_t i; + + if (!hw->risc_intr) { + PMD_DRV_LOG(ERR, " to allocate risc_intr"); + hw->risc_intr = rte_zmalloc("risc_intr", + ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0); + if (hw->risc_intr == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate risc_intr"); + return -ENOMEM; + } + } + + for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) { + if (dev->intr_handle->efds[i] < 0) { + PMD_DRV_LOG(ERR, "[%u]risc interrupt fd is invalid", i); + rte_free(hw->risc_intr); + hw->risc_intr = NULL; + return -1; + } + + struct rte_intr_handle *intr_handle = hw->risc_intr + i; + + intr_handle->fd = dev->intr_handle->efds[i]; + intr_handle->type = dev->intr_handle->type; + } + + return 0; +} + +static int32_t +zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (!hw->dtb_intr) { + hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0); + if (hw->dtb_intr == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate dtb_intr"); + return -ENOMEM; + } + } + + if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) { + PMD_DRV_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1); + rte_free(hw->dtb_intr); + hw->dtb_intr = NULL; + return -1; + } + hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1]; + hw->dtb_intr->type = dev->intr_handle->type; + return 0; +} + +static int32_t +zxdh_queues_bind_intr(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int32_t i; + uint16_t vec; + + if (!dev->data->dev_conf.intr_conf.rxq) { + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, + hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR); + PMD_DRV_LOG(DEBUG, "vq%d irq set 0x%x, get 0x%x", + i * 2, ZXDH_MSI_NO_VECTOR, vec); + } + } else { + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, + hw->vqs[i * 2], i + ZXDH_QUEUE_INTR_VEC_BASE); + PMD_DRV_LOG(DEBUG, "vq%d irq set %d, get %d", + i * 2, i + ZXDH_QUEUE_INTR_VEC_BASE, vec); + } + } + /* mask all txq intr */ + for (i = 0; i < dev->data->nb_tx_queues; ++i) { + vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, + hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR); + PMD_DRV_LOG(DEBUG, "vq%d irq set 0x%x, get 0x%x", + (i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec); + } + return 0; +} + +static int32_t +zxdh_configure_intr(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int32_t ret = 0; + + if (!rte_intr_cap_multiple(dev->intr_handle)) { + PMD_DRV_LOG(ERR, "Multiple intr vector not supported"); + return -ENOTSUP; + } + zxdh_intr_release(dev); + uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM; + + if (dev->data->dev_conf.intr_conf.rxq) + nb_efd += dev->data->nb_rx_queues; + + if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) { + PMD_DRV_LOG(ERR, "Fail to create eventfd"); + return -1; + } + + if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", + hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM)) { + PMD_DRV_LOG(ERR, "Failed to allocate %u rxq vectors", + hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM); + return -ENOMEM; + } + PMD_DRV_LOG(DEBUG, "allocate %u rxq vectors", dev->intr_handle->vec_list_size); + if (zxdh_setup_risc_interrupts(dev) != 0) { + PMD_DRV_LOG(ERR, "Error setting up rsic_v interrupts!"); + ret = -1; + goto free_intr_vec; + } + if (zxdh_setup_dtb_interrupts(dev) != 0) { + PMD_DRV_LOG(ERR, "Error setting up dtb interrupts!"); + ret = -1; + goto free_intr_vec; + } + + if (zxdh_queues_bind_intr(dev) < 0) { + PMD_DRV_LOG(ERR, "Failed to bind queue/interrupt"); + ret = -1; + goto free_intr_vec; + } + + if (zxdh_intr_enable(dev) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); + ret = -1; + goto free_intr_vec; + } + return 0; + +free_intr_vec: + zxdh_intr_release(dev); + return ret; +} + +static int32_t +zxdh_features_update(struct zxdh_hw *hw, + const struct rte_eth_rxmode *rxmode, + const struct rte_eth_txmode *txmode) +{ + uint64_t rx_offloads = rxmode->offloads; + uint64_t tx_offloads = txmode->offloads; + uint64_t req_features = hw->guest_features; + + if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) + req_features |= (1ULL << ZXDH_NET_F_GUEST_CSUM); + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) + req_features |= (1ULL << ZXDH_NET_F_GUEST_TSO4) | + (1ULL << ZXDH_NET_F_GUEST_TSO6); + + if (tx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) + req_features |= (1ULL << ZXDH_NET_F_CSUM); + + if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) + req_features |= (1ULL << ZXDH_NET_F_HOST_TSO4) | + (1ULL << ZXDH_NET_F_HOST_TSO6); + + if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO) + req_features |= (1ULL << ZXDH_NET_F_HOST_UFO); + + req_features = req_features & hw->host_features; + hw->guest_features = req_features; + + ZXDH_VTPCI_OPS(hw)->set_features(hw, req_features); + + if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) && + !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) { + PMD_DRV_LOG(ERR, "rx checksum not available on this host"); + return -ENOTSUP; + } + + if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) && + (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || + !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) { + PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host"); + return -ENOTSUP; + } + return 0; +} + +static bool +rx_offload_enabled(struct zxdh_hw *hw) +{ + return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) || + vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || + vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6); +} + +static bool +tx_offload_enabled(struct zxdh_hw *hw) +{ + return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) || + vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) || + vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) || + vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO); +} + +static void +zxdh_dev_free_mbufs(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t nr_vq = hw->queue_num; + uint32_t i = 0; + + const char *type = NULL; + struct zxdh_virtqueue *vq = NULL; + struct rte_mbuf *buf = NULL; + int32_t queue_type = 0; + + if (hw->vqs == NULL) + return; + + for (i = 0; i < nr_vq; i++) { + vq = hw->vqs[i]; + if (!vq) + continue; + + queue_type = zxdh_get_queue_type(i); + if (queue_type == ZXDH_VTNET_RQ) + type = "rxq"; + else if (queue_type == ZXDH_VTNET_TQ) + type = "txq"; + else + continue; + PMD_DRV_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i); + + while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) + rte_pktmbuf_free(buf); + } +} + +static int32_t +zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t base = (queue_type == ZXDH_VTNET_RQ) ? 0 : 1; + uint16_t i = 0; + uint16_t j = 0; + uint16_t done = 0; + int32_t ret = 0; + + ret = zxdh_timedlock(hw, 1000); + if (ret) { + PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout"); + return -1; + } + + /* Iterate COI table and find free channel */ + for (i = ZXDH_QUEUES_BASE / 32; i < ZXDH_TOTAL_QUEUES_NUM / 32; i++) { + uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t)); + uint32_t var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr); + + for (j = base; j < 32; j += 2) { + /* Got the available channel & update COI table */ + if ((var & (1 << j)) == 0) { + var |= (1 << j); + zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var); + done = 1; + break; + } + } + if (done) + break; + } + zxdh_release_lock(hw); + /* check for no channel condition */ + if (done != 1) { + PMD_DRV_LOG(ERR, "NO availd queues"); + return -1; + } + /* reruen available channel ID */ + return (i * 32) + j; +} + +static int32_t +zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (hw->channel_context[lch].valid == 1) { + PMD_DRV_LOG(DEBUG, "Logic channel:%u already acquired Physics channel:%u", + lch, hw->channel_context[lch].ph_chno); + return hw->channel_context[lch].ph_chno; + } + int32_t pch = zxdh_get_available_channel(dev, zxdh_get_queue_type(lch)); + + if (pch < 0) { + PMD_DRV_LOG(ERR, "Failed to acquire channel"); + return -1; + } + hw->channel_context[lch].ph_chno = (uint16_t)pch; + hw->channel_context[lch].valid = 1; + PMD_DRV_LOG(DEBUG, "Acquire channel success lch:%u --> pch:%d", lch, pch); + return 0; +} + +static void +zxdh_init_vring(struct zxdh_virtqueue *vq) +{ + int32_t size = vq->vq_nentries; + uint8_t *ring_mem = vq->vq_ring_virt_mem; + + memset(ring_mem, 0, vq->vq_ring_size); + + vq->vq_used_cons_idx = 0; + vq->vq_desc_head_idx = 0; + vq->vq_avail_idx = 0; + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + memset(vq->vq_descx, 0, sizeof(struct zxdh_vq_desc_extra) * vq->vq_nentries); + vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size); + vring_desc_init_packed(vq, size); + virtqueue_disable_intr(vq); +} + +static int32_t +zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx) +{ + char vq_name[ZXDH_VIRTQUEUE_MAX_NAME_SZ] = {0}; + char vq_hdr_name[ZXDH_VIRTQUEUE_MAX_NAME_SZ] = {0}; + const struct rte_memzone *mz = NULL; + const struct rte_memzone *hdr_mz = NULL; + uint32_t size = 0; + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_virtnet_rx *rxvq = NULL; + struct zxdh_virtnet_tx *txvq = NULL; + struct zxdh_virtqueue *vq = NULL; + size_t sz_hdr_mz = 0; + void *sw_ring = NULL; + int32_t queue_type = zxdh_get_queue_type(vtpci_logic_qidx); + int32_t numa_node = dev->device->numa_node; + uint16_t vtpci_phy_qidx = 0; + uint32_t vq_size = 0; + int32_t ret = 0; + + if (hw->channel_context[vtpci_logic_qidx].valid == 0) { + PMD_DRV_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx); + return -EINVAL; + } + vtpci_phy_qidx = hw->channel_context[vtpci_logic_qidx].ph_chno; + + PMD_DRV_LOG(DEBUG, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d", + vtpci_logic_qidx, vtpci_phy_qidx, numa_node); + + vq_size = ZXDH_QUEUE_DEPTH; + + if (ZXDH_VTPCI_OPS(hw)->set_queue_num != NULL) + ZXDH_VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size); + + snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, vtpci_phy_qidx); + + size = RTE_ALIGN_CEIL(sizeof(*vq) + vq_size * sizeof(struct zxdh_vq_desc_extra), + RTE_CACHE_LINE_SIZE); + if (queue_type == ZXDH_VTNET_TQ) { + /* + * For each xmit packet, allocate a zxdh_net_hdr + * and indirect ring elements + */ + sz_hdr_mz = vq_size * sizeof(struct zxdh_tx_region); + } + + vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, numa_node); + if (vq == NULL) { + PMD_DRV_LOG(ERR, "can not allocate vq"); + return -ENOMEM; + } + hw->vqs[vtpci_logic_qidx] = vq; + + vq->hw = hw; + vq->vq_queue_index = vtpci_phy_qidx; + vq->vq_nentries = vq_size; + + vq->vq_packed.used_wrap_counter = 1; + vq->vq_packed.cached_flags = ZXDH_VRING_PACKED_DESC_F_AVAIL; + vq->vq_packed.event_flags_shadow = 0; + if (queue_type == ZXDH_VTNET_RQ) + vq->vq_packed.cached_flags |= ZXDH_VRING_DESC_F_WRITE; + + /* + * Reserve a memzone for vring elements + */ + size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN); + PMD_DRV_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); + + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, + numa_node, RTE_MEMZONE_IOVA_CONTIG, + ZXDH_PCI_VRING_ALIGN); + if (mz == NULL) { + if (rte_errno == EEXIST) + mz = rte_memzone_lookup(vq_name); + if (mz == NULL) { + ret = -ENOMEM; + goto fail_q_alloc; + } + } + + memset(mz->addr, 0, mz->len); + + vq->vq_ring_mem = mz->iova; + vq->vq_ring_virt_mem = mz->addr; + + zxdh_init_vring(vq); + + if (sz_hdr_mz) { + snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", + dev->data->port_id, vtpci_phy_qidx); + hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, + numa_node, RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); + if (hdr_mz == NULL) { + if (rte_errno == EEXIST) + hdr_mz = rte_memzone_lookup(vq_hdr_name); + if (hdr_mz == NULL) { + ret = -ENOMEM; + goto fail_q_alloc; + } + } + } + + if (queue_type == ZXDH_VTNET_RQ) { + size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]); + + sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node); + if (!sw_ring) { + PMD_DRV_LOG(ERR, "can not allocate RX soft ring"); + ret = -ENOMEM; + goto fail_q_alloc; + } + + vq->sw_ring = sw_ring; + rxvq = &vq->rxq; + rxvq->vq = vq; + rxvq->port_id = dev->data->port_id; + rxvq->mz = mz; + } else { /* queue_type == VTNET_TQ */ + txvq = &vq->txq; + txvq->vq = vq; + txvq->port_id = dev->data->port_id; + txvq->mz = mz; + txvq->zxdh_net_hdr_mz = hdr_mz; + txvq->zxdh_net_hdr_mem = hdr_mz->iova; + } + + vq->offset = offsetof(struct rte_mbuf, buf_iova); + if (queue_type == ZXDH_VTNET_TQ) { + struct zxdh_tx_region *txr = hdr_mz->addr; + uint32_t i; + + memset(txr, 0, vq_size * sizeof(*txr)); + for (i = 0; i < vq_size; i++) { + /* first indirect descriptor is always the tx header */ + struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir; + + vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir)); + start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) + + offsetof(struct zxdh_tx_region, tx_hdr); + /* length will be updated to actual pi hdr size when xmit pkt */ + start_dp->len = 0; + } + } + if (ZXDH_VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) { + PMD_DRV_LOG(ERR, "setup_queue failed"); + return -EINVAL; + } + return 0; +fail_q_alloc: + rte_free(sw_ring); + rte_memzone_free(hdr_mz); + rte_memzone_free(mz); + rte_free(vq); + return ret; +} + +static int32_t +zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq) +{ + uint16_t lch; + struct zxdh_hw *hw = dev->data->dev_private; + + hw->vqs = rte_zmalloc(NULL, sizeof(struct zxdh_virtqueue *) * nr_vq, 0); + if (!hw->vqs) { + PMD_DRV_LOG(ERR, "Failed to allocate vqs"); + return -ENOMEM; + } + for (lch = 0; lch < nr_vq; lch++) { + if (zxdh_acquire_channel(dev, lch) < 0) { + PMD_DRV_LOG(ERR, "Failed to acquire the channels"); + zxdh_free_queues(dev); + return -1; + } + if (zxdh_init_queue(dev, lch) < 0) { + PMD_DRV_LOG(ERR, "Failed to alloc virtio queue"); + zxdh_free_queues(dev); + return -1; + } + } + return 0; +} + + +static int32_t +zxdh_dev_configure(struct rte_eth_dev *dev) +{ + const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; + struct zxdh_hw *hw = dev->data->dev_private; + uint32_t nr_vq = 0; + int32_t ret = 0; + + if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) { + PMD_DRV_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!", + dev->data->nb_rx_queues, dev->data->nb_tx_queues); + return -EINVAL; + } + if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) { + PMD_DRV_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!", + dev->data->nb_rx_queues, dev->data->nb_tx_queues, + ZXDH_QUEUES_NUM_MAX); + return -EINVAL; + } + if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode); + return -EINVAL; + } + + if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode); + return -EINVAL; + } + if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode); + return -EINVAL; + } + + if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode); + return -EINVAL; + } + + ret = zxdh_features_update(hw, rxmode, txmode); + if (ret < 0) + return ret; + + /* check if lsc interrupt feature is enabled */ + if (dev->data->dev_conf.intr_conf.lsc) { + if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { + PMD_DRV_LOG(ERR, "link status not supported by host"); + return -ENOTSUP; + } + } + + hw->has_tx_offload = tx_offload_enabled(hw); + hw->has_rx_offload = rx_offload_enabled(hw); + + nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues; + if (nr_vq == hw->queue_num) + return 0; + + PMD_DRV_LOG(DEBUG, "queue changed need reset "); + /* Reset the device although not necessary at startup */ + zxdh_pci_reset(hw); + + /* Tell the host we've noticed this device. */ + zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK); + + /* Tell the host we've known how to drive the device. */ + zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER); + /* The queue needs to be released when reconfiguring*/ + if (hw->vqs != NULL) { + zxdh_dev_free_mbufs(dev); + zxdh_free_queues(dev); + } + + hw->queue_num = nr_vq; + ret = zxdh_alloc_queues(dev, nr_vq); + if (ret < 0) + return ret; + + zxdh_datach_set(dev); + + if (zxdh_configure_intr(dev) < 0) { + PMD_DRV_LOG(ERR, "Failed to configure interrupt"); + zxdh_free_queues(dev); + return -1; + } + + zxdh_pci_reinit_complete(hw); + + return ret; +} + +static int +zxdh_dev_close(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int ret = 0; + + zxdh_intr_release(dev); + zxdh_pci_reset(hw); + + zxdh_dev_free_mbufs(dev); + zxdh_free_queues(dev); + + zxdh_bar_msg_chan_exit(); + + if (dev->data->mac_addrs != NULL) { + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + } + + return ret; +} + +/* dev_ops for zxdh, bare necessities for basic operation */ +static const struct eth_dev_ops zxdh_eth_dev_ops = { + .dev_configure = zxdh_dev_configure, + .dev_close = zxdh_dev_close, + .dev_infos_get = zxdh_dev_infos_get, +}; + +static int32_t +zxdh_init_device(struct rte_eth_dev *eth_dev) +{ + struct zxdh_hw *hw = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + int ret = 0; + + ret = zxdh_read_pci_caps(pci_dev, hw); + if (ret) { + PMD_DRV_LOG(ERR, "port 0x%x pci caps read failed .", hw->port_id); + goto err; + } + + zxdh_hw_internal[hw->port_id].zxdh_vtpci_ops = &zxdh_dev_pci_ops; + zxdh_pci_reset(hw); + zxdh_get_pci_dev_config(hw); + + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); + + /* If host does not support both status and MSI-X then disable LSC */ + if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + else + eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; + + return 0; + +err: + PMD_DRV_LOG(ERR, "port %d init device failed", eth_dev->data->port_id); + return ret; +} + +static int +zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw) +{ + if (zxdh_phyport_get(eth_dev, &hw->phyport) != 0) { + PMD_DRV_LOG(ERR, "Failed to get phyport"); + return -1; + } + PMD_DRV_LOG(INFO, "Get phyport success: 0x%x", hw->phyport); + + hw->vfid = zxdh_vport_to_vfid(hw->vport); + + if (zxdh_panelid_get(eth_dev, &hw->panel_id) != 0) { + PMD_DRV_LOG(ERR, "Failed to get panel_id"); + return -1; + } + PMD_DRV_LOG(INFO, "Get panel id success: 0x%x", hw->panel_id); + + return 0; +} + +static int +zxdh_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct zxdh_hw *hw = eth_dev->data->dev_private; + int ret = 0; + + eth_dev->dev_ops = &zxdh_eth_dev_ops; + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac", + ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate %d bytes store MAC addresses", + ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN); + return -ENOMEM; + } + + memset(hw, 0, sizeof(*hw)); + hw->bar_addr[0] = (uint64_t)pci_dev->mem_resource[0].addr; + if (hw->bar_addr[0] == 0) { + PMD_DRV_LOG(ERR, "Bad mem resource."); + return -EIO; + } + + hw->device_id = pci_dev->id.device_id; + hw->port_id = eth_dev->data->port_id; + hw->eth_dev = eth_dev; + hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN; + hw->duplex = RTE_ETH_LINK_FULL_DUPLEX; + hw->is_pf = 0; + + if (pci_dev->id.device_id == ZXDH_E310_PF_DEVICEID || + pci_dev->id.device_id == ZXDH_E312_PF_DEVICEID) { + hw->is_pf = 1; + } + + ret = zxdh_init_device(eth_dev); + if (ret < 0) + goto err_zxdh_init; + + ret = zxdh_msg_chan_init(); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to init bar msg chan"); + goto err_zxdh_init; + } + hw->msg_chan_init = 1; + + ret = zxdh_msg_chan_hwlock_init(eth_dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "zxdh_msg_chan_hwlock_init failed ret %d", ret); + goto err_zxdh_init; + } + + ret = zxdh_msg_chan_enable(eth_dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "zxdh_msg_bar_chan_enable failed ret %d", ret); + goto err_zxdh_init; + } + + ret = zxdh_agent_comm(eth_dev, hw); + if (ret != 0) + goto err_zxdh_init; + + ret = zxdh_configure_intr(eth_dev); + if (ret != 0) + goto err_zxdh_init; + + return ret; + +err_zxdh_init: + zxdh_intr_release(eth_dev); + zxdh_bar_msg_chan_exit(); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return ret; +} + +static int +zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct zxdh_hw), + zxdh_eth_dev_init); +} + +static int +zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev) +{ + int ret = 0; + + ret = zxdh_dev_close(eth_dev); + + return ret; +} + +static int +zxdh_eth_pci_remove(struct rte_pci_device *pci_dev) +{ + int ret = rte_eth_dev_pci_generic_remove(pci_dev, zxdh_eth_dev_uninit); + + return ret; +} + +static const struct rte_pci_id pci_id_zxdh_map[] = { + {RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E310_PF_DEVICEID)}, + {RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E310_VF_DEVICEID)}, + {RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E312_PF_DEVICEID)}, + {RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E312_VF_DEVICEID)}, + {.vendor_id = 0, /* sentinel */ }, +}; +static struct rte_pci_driver zxdh_pmd = { + .id_table = pci_id_zxdh_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = zxdh_eth_pci_probe, + .remove = zxdh_eth_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_zxdh, zxdh_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_zxdh, pci_id_zxdh_map); +RTE_PMD_REGISTER_KMOD_DEP(net_zxdh, "* vfio-pci"); +RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_driver, driver, NOTICE); +RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_rx, rx, NOTICE); +RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_tx, tx, NOTICE); +RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_msg, msg, NOTICE); diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h new file mode 100644 index 0000000000..7658cbb461 --- /dev/null +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_ETHDEV_H +#define ZXDH_ETHDEV_H + +#include +#include "ethdev_driver.h" +#include +#include + +/* ZXDH PCI vendor/device ID. */ +#define ZXDH_PCI_VENDOR_ID 0x1cf2 + +#define ZXDH_E310_PF_DEVICEID 0x8061 +#define ZXDH_E310_VF_DEVICEID 0x8062 +#define ZXDH_E312_PF_DEVICEID 0x8049 +#define ZXDH_E312_VF_DEVICEID 0x8060 + +#define ZXDH_MAX_UC_MAC_ADDRS 32 +#define ZXDH_MAX_MC_MAC_ADDRS 32 +#define ZXDH_MAX_MAC_ADDRS (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS) + +#define ZXDH_NUM_BARS 2 +#define ZXDH_RX_QUEUES_MAX 128U +#define ZXDH_TX_QUEUES_MAX 128U +#define ZXDH_MIN_RX_BUFSIZE 64 +#define ZXDH_MAX_RX_PKTLEN 14000U +#define ZXDH_QUEUE_DEPTH 1024 +#define ZXDH_QUEUES_BASE 0 +#define ZXDH_TOTAL_QUEUES_NUM 4096 +#define ZXDH_QUEUES_NUM_MAX 256 +#define ZXDH_QUERES_SHARE_BASE (0x5000) + +#define ZXDH_MBUF_BURST_SZ 64 + +union zxdh_virport_num { + uint16_t vport; + struct { + uint16_t vfid:8; + uint16_t pfid:3; + uint16_t vf_flag:1; + uint16_t epid:3; + uint16_t direct_flag:1; + }; +}; + +struct zxdh_chnl_context { + uint16_t valid; + uint16_t ph_chno; +}; + +struct zxdh_hw { + struct rte_eth_dev *eth_dev; + struct zxdh_pci_common_cfg *common_cfg; + struct zxdh_net_config *dev_cfg; + struct rte_intr_handle *risc_intr; + struct rte_intr_handle *dtb_intr; + struct zxdh_virtqueue **vqs; + struct zxdh_chnl_context channel_context[ZXDH_QUEUES_NUM_MAX]; + union zxdh_virport_num vport; + + uint64_t bar_addr[ZXDH_NUM_BARS]; + uint64_t host_features; + uint64_t guest_features; + uint32_t max_queue_pairs; + uint32_t speed; + uint32_t notify_off_multiplier; + uint16_t *notify_base; + uint16_t pcie_id; + uint16_t device_id; + uint16_t port_id; + uint16_t vfid; + uint16_t queue_num; + + uint8_t *isr; + uint8_t weak_barriers; + uint8_t intr_enabled; + uint8_t use_msix; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + + uint8_t duplex; + uint8_t is_pf; + uint8_t msg_chan_init; + uint8_t phyport; + uint8_t panel_id; + uint8_t has_tx_offload; + uint8_t has_rx_offload; +}; + +uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v); + +#endif /* ZXDH_ETHDEV_H */ diff --git a/drivers/net/zxdh/zxdh_logs.h b/drivers/net/zxdh/zxdh_logs.h new file mode 100644 index 0000000000..ad864a2089 --- /dev/null +++ b/drivers/net/zxdh/zxdh_logs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_LOGS_H +#define ZXDH_LOGS_H + +#include + +extern int zxdh_logtype_driver; +#define RTE_LOGTYPE_ZXDH_DRIVER zxdh_logtype_driver +#define PMD_DRV_LOG(level, ...) \ + RTE_LOG_LINE_PREFIX(level, ZXDH_DRIVER, "%s(): ", __func__, __VA_ARGS__) + +extern int zxdh_logtype_rx; +#define RTE_LOGTYPE_ZXDH_RX zxdh_logtype_rx +#define PMD_RX_LOG(level, ...) \ + RTE_LOG_LINE_PREFIX(level, ZXDH_RX, "%s(): ", __func__, __VA_ARGS__) + +extern int zxdh_logtype_tx; +#define RTE_LOGTYPE_ZXDH_TX zxdh_logtype_tx +#define PMD_TX_LOG(level, ...) \ + RTE_LOG_LINE_PREFIX(level, ZXDH_TX, "%s(): ", __func__, __VA_ARGS__) + +extern int zxdh_logtype_msg; +#define RTE_LOGTYPE_ZXDH_MSG zxdh_logtype_msg +#define PMD_MSG_LOG(level, ...) \ + RTE_LOG_LINE_PREFIX(level, ZXDH_MSG, "%s(): ", __func__, __VA_ARGS__) + +#endif /* ZXDH_LOGS_H */ diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c new file mode 100644 index 0000000000..53cf972f86 --- /dev/null +++ b/drivers/net/zxdh/zxdh_msg.c @@ -0,0 +1,1037 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "zxdh_ethdev.h" +#include "zxdh_logs.h" +#include "zxdh_msg.h" + +#define ZXDH_REPS_INFO_FLAG_USABLE 0x00 +#define ZXDH_BAR_SEQID_NUM_MAX 256 +#define ZXDH_REPS_INFO_FLAG_USED 0xa0 + +#define ZXDH_PCIEID_IS_PF_MASK (0x0800) +#define ZXDH_PCIEID_PF_IDX_MASK (0x0700) +#define ZXDH_PCIEID_VF_IDX_MASK (0x00ff) +#define ZXDH_PCIEID_EP_IDX_MASK (0x7000) +/* PCIEID bit field offset */ +#define ZXDH_PCIEID_PF_IDX_OFFSET (8) +#define ZXDH_PCIEID_EP_IDX_OFFSET (12) + +#define ZXDH_MULTIPLY_BY_8(x) ((x) << 3) +#define ZXDH_MULTIPLY_BY_32(x) ((x) << 5) +#define ZXDH_MULTIPLY_BY_256(x) ((x) << 8) + +#define ZXDH_MAX_EP_NUM (4) +#define ZXDH_MAX_HARD_SPINLOCK_NUM (511) + +#define ZXDH_LOCK_PRIMARY_ID_MASK (0x8000) +/* bar offset */ +#define ZXDH_BAR0_CHAN_RISC_OFFSET (0x2000) +#define ZXDH_BAR0_CHAN_PFVF_OFFSET (0x3000) +#define ZXDH_BAR0_SPINLOCK_OFFSET (0x4000) +#define ZXDH_FW_SHRD_OFFSET (0x5000) +#define ZXDH_FW_SHRD_INNER_HW_LABEL_PAT (0x800) +#define ZXDH_HW_LABEL_OFFSET \ + (ZXDH_FW_SHRD_OFFSET + ZXDH_FW_SHRD_INNER_HW_LABEL_PAT) + +#define ZXDH_CHAN_RISC_SPINLOCK_OFFSET \ + (ZXDH_BAR0_SPINLOCK_OFFSET - ZXDH_BAR0_CHAN_RISC_OFFSET) +#define ZXDH_CHAN_PFVF_SPINLOCK_OFFSET \ + (ZXDH_BAR0_SPINLOCK_OFFSET - ZXDH_BAR0_CHAN_PFVF_OFFSET) +#define ZXDH_CHAN_RISC_LABEL_OFFSET \ + (ZXDH_HW_LABEL_OFFSET - ZXDH_BAR0_CHAN_RISC_OFFSET) +#define ZXDH_CHAN_PFVF_LABEL_OFFSET \ + (ZXDH_HW_LABEL_OFFSET - ZXDH_BAR0_CHAN_PFVF_OFFSET) + +#define ZXDH_REPS_HEADER_LEN_OFFSET 1 +#define ZXDH_REPS_HEADER_PAYLOAD_OFFSET 4 +#define ZXDH_REPS_HEADER_REPLYED 0xff + +#define ZXDH_BAR_MSG_CHAN_USABLE 0 +#define ZXDH_BAR_MSG_CHAN_USED 1 + +#define ZXDH_BAR_MSG_POL_MASK (0x10) +#define ZXDH_BAR_MSG_POL_OFFSET (4) + +#define ZXDH_BAR_ALIGN_WORD_MASK 0xfffffffc +#define ZXDH_BAR_MSG_VALID_MASK 1 +#define ZXDH_BAR_MSG_VALID_OFFSET 0 + +#define ZXDH_BAR_PF_NUM 7 +#define ZXDH_BAR_VF_NUM 256 +#define ZXDH_BAR_INDEX_PF_TO_VF 0 +#define ZXDH_BAR_INDEX_MPF_TO_MPF 0xff +#define ZXDH_BAR_INDEX_MPF_TO_PFVF 0 +#define ZXDH_BAR_INDEX_PFVF_TO_MPF 0 + +#define ZXDH_MAX_HARD_SPINLOCK_ASK_TIMES (1000) +#define ZXDH_SPINLOCK_POLLING_SPAN_US (100) + +#define ZXDH_BAR_MSG_SRC_NUM 3 +#define ZXDH_BAR_MSG_SRC_MPF 0 +#define ZXDH_BAR_MSG_SRC_PF 1 +#define ZXDH_BAR_MSG_SRC_VF 2 +#define ZXDH_BAR_MSG_SRC_ERR 0xff +#define ZXDH_BAR_MSG_DST_NUM 3 +#define ZXDH_BAR_MSG_DST_RISC 0 +#define ZXDH_BAR_MSG_DST_MPF 2 +#define ZXDH_BAR_MSG_DST_PFVF 1 +#define ZXDH_BAR_MSG_DST_ERR 0xff + +#define ZXDH_LOCK_TYPE_HARD (1) +#define ZXDH_LOCK_TYPE_SOFT (0) +#define ZXDH_BAR_INDEX_TO_RISC 0 + +#define ZXDH_BAR_CHAN_INDEX_SEND 0 +#define ZXDH_BAR_CHAN_INDEX_RECV 1 + +#define ZXDH_BAR_CHAN_MSG_SYNC 0 +#define ZXDH_BAR_CHAN_MSG_NO_EMEC 0 +#define ZXDH_BAR_CHAN_MSG_EMEC 1 +#define ZXDH_BAR_CHAN_MSG_NO_ACK 0 +#define ZXDH_BAR_CHAN_MSG_ACK 1 + +uint8_t subchan_id_tbl[ZXDH_BAR_MSG_SRC_NUM][ZXDH_BAR_MSG_DST_NUM] = { + {ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND}, + {ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_RECV}, + {ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_RECV, ZXDH_BAR_CHAN_INDEX_RECV} +}; + +uint8_t chan_id_tbl[ZXDH_BAR_MSG_SRC_NUM][ZXDH_BAR_MSG_DST_NUM] = { + {ZXDH_BAR_INDEX_TO_RISC, ZXDH_BAR_INDEX_MPF_TO_PFVF, ZXDH_BAR_INDEX_MPF_TO_MPF}, + {ZXDH_BAR_INDEX_TO_RISC, ZXDH_BAR_INDEX_PF_TO_VF, ZXDH_BAR_INDEX_PFVF_TO_MPF}, + {ZXDH_BAR_INDEX_TO_RISC, ZXDH_BAR_INDEX_PF_TO_VF, ZXDH_BAR_INDEX_PFVF_TO_MPF} +}; + +uint8_t lock_type_tbl[ZXDH_BAR_MSG_SRC_NUM][ZXDH_BAR_MSG_DST_NUM] = { + {ZXDH_LOCK_TYPE_HARD, ZXDH_LOCK_TYPE_HARD, ZXDH_LOCK_TYPE_HARD}, + {ZXDH_LOCK_TYPE_SOFT, ZXDH_LOCK_TYPE_SOFT, ZXDH_LOCK_TYPE_HARD}, + {ZXDH_LOCK_TYPE_HARD, ZXDH_LOCK_TYPE_HARD, ZXDH_LOCK_TYPE_HARD} +}; + +struct zxdh_dev_stat { + bool is_mpf_scanned; + bool is_res_init; + int16_t dev_cnt; /* probe cnt */ +}; + +struct zxdh_seqid_item { + void *reps_addr; + uint16_t id; + uint16_t buffer_len; + uint16_t flag; +}; + +struct zxdh_seqid_ring { + uint16_t cur_id; + rte_spinlock_t lock; + struct zxdh_seqid_item reps_info_tbl[ZXDH_BAR_SEQID_NUM_MAX]; +}; + +static struct zxdh_dev_stat g_dev_stat; +static struct zxdh_seqid_ring g_seqid_ring; +static uint8_t tmp_msg_header[ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL]; +static rte_spinlock_t chan_lock; + +zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[ZXDH_BAR_MSG_MODULE_NUM]; + +static inline const char +*zxdh_module_id_name(int val) +{ + switch (val) { + case ZXDH_BAR_MODULE_DBG: return "ZXDH_BAR_MODULE_DBG"; + case ZXDH_BAR_MODULE_TBL: return "ZXDH_BAR_MODULE_TBL"; + case ZXDH_BAR_MODULE_MISX: return "ZXDH_BAR_MODULE_MISX"; + case ZXDH_BAR_MODULE_SDA: return "ZXDH_BAR_MODULE_SDA"; + case ZXDH_BAR_MODULE_RDMA: return "ZXDH_BAR_MODULE_RDMA"; + case ZXDH_BAR_MODULE_DEMO: return "ZXDH_BAR_MODULE_DEMO"; + case ZXDH_BAR_MODULE_SMMU: return "ZXDH_BAR_MODULE_SMMU"; + case ZXDH_BAR_MODULE_MAC: return "ZXDH_BAR_MODULE_MAC"; + case ZXDH_BAR_MODULE_VDPA: return "ZXDH_BAR_MODULE_VDPA"; + case ZXDH_BAR_MODULE_VQM: return "ZXDH_BAR_MODULE_VQM"; + case ZXDH_BAR_MODULE_NP: return "ZXDH_BAR_MODULE_NP"; + case ZXDH_BAR_MODULE_VPORT: return "ZXDH_BAR_MODULE_VPORT"; + case ZXDH_BAR_MODULE_BDF: return "ZXDH_BAR_MODULE_BDF"; + case ZXDH_BAR_MODULE_RISC_READY: return "ZXDH_BAR_MODULE_RISC_READY"; + case ZXDH_BAR_MODULE_REVERSE: return "ZXDH_BAR_MODULE_REVERSE"; + case ZXDH_BAR_MDOULE_NVME: return "ZXDH_BAR_MDOULE_NVME"; + case ZXDH_BAR_MDOULE_NPSDK: return "ZXDH_BAR_MDOULE_NPSDK"; + case ZXDH_BAR_MODULE_NP_TODO: return "ZXDH_BAR_MODULE_NP_TODO"; + case ZXDH_MODULE_BAR_MSG_TO_PF: return "ZXDH_MODULE_BAR_MSG_TO_PF"; + case ZXDH_MODULE_BAR_MSG_TO_VF: return "ZXDH_MODULE_BAR_MSG_TO_VF"; + case ZXDH_MODULE_FLASH: return "ZXDH_MODULE_FLASH"; + case ZXDH_BAR_MODULE_OFFSET_GET: return "ZXDH_BAR_MODULE_OFFSET_GET"; + case ZXDH_BAR_EVENT_OVS_WITH_VCB: return "ZXDH_BAR_EVENT_OVS_WITH_VCB"; + default: return "NA"; + } +} + +static uint16_t +zxdh_pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst) +{ + uint16_t lock_id = 0; + uint16_t pf_idx = (src_pcieid & ZXDH_PCIEID_PF_IDX_MASK) >> ZXDH_PCIEID_PF_IDX_OFFSET; + uint16_t ep_idx = (src_pcieid & ZXDH_PCIEID_EP_IDX_MASK) >> ZXDH_PCIEID_EP_IDX_OFFSET; + + switch (dst) { + /* msg to risc */ + case ZXDH_MSG_CHAN_END_RISC: + lock_id = ZXDH_MULTIPLY_BY_8(ep_idx) + pf_idx; + break; + /* msg to pf/vf */ + case ZXDH_MSG_CHAN_END_VF: + case ZXDH_MSG_CHAN_END_PF: + lock_id = ZXDH_MULTIPLY_BY_8(ep_idx) + pf_idx + + ZXDH_MULTIPLY_BY_8(1 + ZXDH_MAX_EP_NUM); + break; + default: + lock_id = 0; + break; + } + if (lock_id >= ZXDH_MAX_HARD_SPINLOCK_NUM) + lock_id = 0; + + return lock_id; +} + +static void +label_write(uint64_t label_lock_addr, uint32_t lock_id, uint16_t value) +{ + *(volatile uint16_t *)(label_lock_addr + lock_id * 2) = value; +} + +static void +spinlock_write(uint64_t virt_lock_addr, uint32_t lock_id, uint8_t data) +{ + *(volatile uint8_t *)((uint64_t)virt_lock_addr + (uint64_t)lock_id) = data; +} + +static uint8_t +spinlock_read(uint64_t virt_lock_addr, uint32_t lock_id) +{ + return *(volatile uint8_t *)((uint64_t)virt_lock_addr + (uint64_t)lock_id); +} + +static int32_t +zxdh_spinlock_lock(uint32_t virt_lock_id, uint64_t virt_addr, + uint64_t label_addr, uint16_t primary_id) +{ + uint32_t lock_rd_cnt = 0; + + do { + /* read to lock */ + uint8_t spl_val = spinlock_read(virt_addr, virt_lock_id); + + if (spl_val == 0) { + label_write((uint64_t)label_addr, virt_lock_id, primary_id); + break; + } + rte_delay_us_block(ZXDH_SPINLOCK_POLLING_SPAN_US); + lock_rd_cnt++; + } while (lock_rd_cnt < ZXDH_MAX_HARD_SPINLOCK_ASK_TIMES); + if (lock_rd_cnt >= ZXDH_MAX_HARD_SPINLOCK_ASK_TIMES) + return -1; + + return 0; +} + +static int32_t +zxdh_spinlock_unlock(uint32_t virt_lock_id, uint64_t virt_addr, uint64_t label_addr) +{ + label_write((uint64_t)label_addr, virt_lock_id, 0); + spinlock_write(virt_addr, virt_lock_id, 0); + return 0; +} + +/** + * Fun: PF init hard_spinlock addr + */ +static int +bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr) +{ + int lock_id = zxdh_pcie_id_to_hard_lock(pcie_id, ZXDH_MSG_CHAN_END_RISC); + + zxdh_spinlock_unlock(lock_id, bar_base_addr + ZXDH_BAR0_SPINLOCK_OFFSET, + bar_base_addr + ZXDH_HW_LABEL_OFFSET); + lock_id = zxdh_pcie_id_to_hard_lock(pcie_id, ZXDH_MSG_CHAN_END_VF); + zxdh_spinlock_unlock(lock_id, bar_base_addr + ZXDH_BAR0_SPINLOCK_OFFSET, + bar_base_addr + ZXDH_HW_LABEL_OFFSET); + return 0; +} + +int +zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (!hw->is_pf) + return 0; + return bar_chan_pf_init_spinlock(hw->pcie_id, (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX])); +} + +int +zxdh_msg_chan_init(void) +{ + uint16_t seq_id = 0; + + g_dev_stat.dev_cnt++; + if (g_dev_stat.is_res_init) + return ZXDH_BAR_MSG_OK; + + rte_spinlock_init(&chan_lock); + g_seqid_ring.cur_id = 0; + rte_spinlock_init(&g_seqid_ring.lock); + + for (seq_id = 0; seq_id < ZXDH_BAR_SEQID_NUM_MAX; seq_id++) { + struct zxdh_seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[seq_id]; + + reps_info->id = seq_id; + reps_info->flag = ZXDH_REPS_INFO_FLAG_USABLE; + } + g_dev_stat.is_res_init = true; + return ZXDH_BAR_MSG_OK; +} + +int +zxdh_bar_msg_chan_exit(void) +{ + if (!g_dev_stat.is_res_init || (--g_dev_stat.dev_cnt > 0)) + return ZXDH_BAR_MSG_OK; + + g_dev_stat.is_res_init = false; + return ZXDH_BAR_MSG_OK; +} + +static int +zxdh_bar_chan_msgid_allocate(uint16_t *msgid) +{ + struct zxdh_seqid_item *seqid_reps_info = NULL; + + rte_spinlock_lock(&g_seqid_ring.lock); + uint16_t g_id = g_seqid_ring.cur_id; + uint16_t count = 0; + int rc = 0; + + do { + count++; + ++g_id; + g_id %= ZXDH_BAR_SEQID_NUM_MAX; + seqid_reps_info = &g_seqid_ring.reps_info_tbl[g_id]; + } while ((seqid_reps_info->flag != ZXDH_REPS_INFO_FLAG_USABLE) && + (count < ZXDH_BAR_SEQID_NUM_MAX)); + + if (count >= ZXDH_BAR_SEQID_NUM_MAX) { + rc = -1; + goto out; + } + seqid_reps_info->flag = ZXDH_REPS_INFO_FLAG_USED; + g_seqid_ring.cur_id = g_id; + *msgid = g_id; + rc = ZXDH_BAR_MSG_OK; + +out: + rte_spinlock_unlock(&g_seqid_ring.lock); + return rc; +} + +static uint16_t +zxdh_bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result, uint16_t *msg_id) +{ + int ret = zxdh_bar_chan_msgid_allocate(msg_id); + + if (ret != ZXDH_BAR_MSG_OK) + return ZXDH_BAR_MSG_ERR_MSGID; + + PMD_MSG_LOG(DEBUG, "allocate msg_id: %u", *msg_id); + struct zxdh_seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[*msg_id]; + + reps_info->reps_addr = result->recv_buffer; + reps_info->buffer_len = result->buffer_len; + return ZXDH_BAR_MSG_OK; +} + +static uint8_t +zxdh_bar_msg_src_index_trans(uint8_t src) +{ + uint8_t src_index = 0; + + switch (src) { + case ZXDH_MSG_CHAN_END_MPF: + src_index = ZXDH_BAR_MSG_SRC_MPF; + break; + case ZXDH_MSG_CHAN_END_PF: + src_index = ZXDH_BAR_MSG_SRC_PF; + break; + case ZXDH_MSG_CHAN_END_VF: + src_index = ZXDH_BAR_MSG_SRC_VF; + break; + default: + src_index = ZXDH_BAR_MSG_SRC_ERR; + break; + } + return src_index; +} + +static uint8_t +zxdh_bar_msg_dst_index_trans(uint8_t dst) +{ + uint8_t dst_index = 0; + + switch (dst) { + case ZXDH_MSG_CHAN_END_MPF: + dst_index = ZXDH_BAR_MSG_DST_MPF; + break; + case ZXDH_MSG_CHAN_END_PF: + dst_index = ZXDH_BAR_MSG_DST_PFVF; + break; + case ZXDH_MSG_CHAN_END_VF: + dst_index = ZXDH_BAR_MSG_DST_PFVF; + break; + case ZXDH_MSG_CHAN_END_RISC: + dst_index = ZXDH_BAR_MSG_DST_RISC; + break; + default: + dst_index = ZXDH_BAR_MSG_SRC_ERR; + break; + } + return dst_index; +} + +static int +zxdh_bar_chan_send_para_check(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result) +{ + uint8_t src_index = 0; + uint8_t dst_index = 0; + + if (in == NULL || result == NULL) { + PMD_MSG_LOG(ERR, "send para ERR: null para."); + return ZXDH_BAR_MSG_ERR_NULL_PARA; + } + src_index = zxdh_bar_msg_src_index_trans(in->src); + dst_index = zxdh_bar_msg_dst_index_trans(in->dst); + + if (src_index == ZXDH_BAR_MSG_SRC_ERR || dst_index == ZXDH_BAR_MSG_DST_ERR) { + PMD_MSG_LOG(ERR, "send para ERR: chan doesn't exist."); + return ZXDH_BAR_MSG_ERR_TYPE; + } + if (in->module_id >= ZXDH_BAR_MSG_MODULE_NUM) { + PMD_MSG_LOG(ERR, "send para ERR: invalid module_id: %d.", in->module_id); + return ZXDH_BAR_MSG_ERR_MODULE; + } + if (in->payload_addr == NULL) { + PMD_MSG_LOG(ERR, "send para ERR: null message."); + return ZXDH_BAR_MSG_ERR_BODY_NULL; + } + if (in->payload_len > ZXDH_BAR_MSG_PAYLOAD_MAX_LEN) { + PMD_MSG_LOG(ERR, "send para ERR: len %d is too long.", in->payload_len); + return ZXDH_BAR_MSG_ERR_LEN; + } + if (in->virt_addr == 0 || result->recv_buffer == NULL) { + PMD_MSG_LOG(ERR, "send para ERR: virt_addr or recv_buffer is NULL."); + return ZXDH_BAR_MSG_ERR_VIRTADDR_NULL; + } + if (result->buffer_len < ZXDH_REPS_HEADER_PAYLOAD_OFFSET) + PMD_MSG_LOG(ERR, "recv buffer len is short than minimal 4 bytes"); + + return ZXDH_BAR_MSG_OK; +} + +static uint64_t +zxdh_subchan_addr_cal(uint64_t virt_addr, uint8_t chan_id, uint8_t subchan_id) +{ + return virt_addr + (2 * chan_id + subchan_id) * ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL; +} + +static uint16_t +zxdh_bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in, uint64_t *subchan_addr) +{ + uint8_t src_index = zxdh_bar_msg_src_index_trans(in->src); + uint8_t dst_index = zxdh_bar_msg_dst_index_trans(in->dst); + uint16_t chan_id = chan_id_tbl[src_index][dst_index]; + uint16_t subchan_id = subchan_id_tbl[src_index][dst_index]; + + *subchan_addr = zxdh_subchan_addr_cal(in->virt_addr, chan_id, subchan_id); + return ZXDH_BAR_MSG_OK; +} + +static int +zxdh_bar_hard_lock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr) +{ + int ret = 0; + uint16_t lockid = zxdh_pcie_id_to_hard_lock(src_pcieid, dst); + + PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x lock, get hardlockid: %u", src_pcieid, lockid); + if (dst == ZXDH_MSG_CHAN_END_RISC) + ret = zxdh_spinlock_lock(lockid, virt_addr + ZXDH_CHAN_RISC_SPINLOCK_OFFSET, + virt_addr + ZXDH_CHAN_RISC_LABEL_OFFSET, + src_pcieid | ZXDH_LOCK_PRIMARY_ID_MASK); + else + ret = zxdh_spinlock_lock(lockid, virt_addr + ZXDH_CHAN_PFVF_SPINLOCK_OFFSET, + virt_addr + ZXDH_CHAN_PFVF_LABEL_OFFSET, + src_pcieid | ZXDH_LOCK_PRIMARY_ID_MASK); + + return ret; +} + +static void +zxdh_bar_hard_unlock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr) +{ + uint16_t lockid = zxdh_pcie_id_to_hard_lock(src_pcieid, dst); + + PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x unlock, get hardlockid: %u", src_pcieid, lockid); + if (dst == ZXDH_MSG_CHAN_END_RISC) + zxdh_spinlock_unlock(lockid, virt_addr + ZXDH_CHAN_RISC_SPINLOCK_OFFSET, + virt_addr + ZXDH_CHAN_RISC_LABEL_OFFSET); + else + zxdh_spinlock_unlock(lockid, virt_addr + ZXDH_CHAN_PFVF_SPINLOCK_OFFSET, + virt_addr + ZXDH_CHAN_PFVF_LABEL_OFFSET); +} + +static int +zxdh_bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid, uint64_t virt_addr) +{ + int ret = 0; + uint8_t src_index = zxdh_bar_msg_src_index_trans(src); + uint8_t dst_index = zxdh_bar_msg_dst_index_trans(dst); + + if (src_index == ZXDH_BAR_MSG_SRC_ERR || dst_index == ZXDH_BAR_MSG_DST_ERR) { + PMD_MSG_LOG(ERR, "lock ERR: chan doesn't exist."); + return ZXDH_BAR_MSG_ERR_TYPE; + } + + ret = zxdh_bar_hard_lock(src_pcieid, dst, virt_addr); + if (ret != 0) + PMD_MSG_LOG(ERR, "dev: 0x%x failed to lock.", src_pcieid); + + return ret; +} + +static int +zxdh_bar_chan_unlock(uint8_t src, uint8_t dst, uint16_t src_pcieid, uint64_t virt_addr) +{ + uint8_t src_index = zxdh_bar_msg_src_index_trans(src); + uint8_t dst_index = zxdh_bar_msg_dst_index_trans(dst); + + if (src_index == ZXDH_BAR_MSG_SRC_ERR || dst_index == ZXDH_BAR_MSG_DST_ERR) { + PMD_MSG_LOG(ERR, "unlock ERR: chan doesn't exist."); + return ZXDH_BAR_MSG_ERR_TYPE; + } + + zxdh_bar_hard_unlock(src_pcieid, dst, virt_addr); + + return ZXDH_BAR_MSG_OK; +} + +static void +zxdh_bar_chan_msgid_free(uint16_t msg_id) +{ + struct zxdh_seqid_item *seqid_reps_info = &g_seqid_ring.reps_info_tbl[msg_id]; + + rte_spinlock_lock(&g_seqid_ring.lock); + seqid_reps_info->flag = ZXDH_REPS_INFO_FLAG_USABLE; + PMD_MSG_LOG(DEBUG, "free msg_id: %u", msg_id); + rte_spinlock_unlock(&g_seqid_ring.lock); +} + +static int +zxdh_bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data) +{ + uint32_t algin_offset = (offset & ZXDH_BAR_ALIGN_WORD_MASK); + + if (unlikely(algin_offset >= ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL)) { + PMD_MSG_LOG(ERR, "algin_offset exceeds channel size!"); + return -1; + } + *(uint32_t *)(subchan_addr + algin_offset) = data; + return 0; +} + +static int +zxdh_bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset, uint32_t *pdata) +{ + uint32_t algin_offset = (offset & ZXDH_BAR_ALIGN_WORD_MASK); + + if (unlikely(algin_offset >= ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL)) { + PMD_MSG_LOG(ERR, "algin_offset exceeds channel size!"); + return -1; + } + *pdata = *(uint32_t *)(subchan_addr + algin_offset); + return 0; +} + +static uint16_t +zxdh_bar_chan_msg_header_set(uint64_t subchan_addr, + struct zxdh_bar_msg_header *msg_header) +{ + uint32_t *data = (uint32_t *)msg_header; + uint16_t i; + + for (i = 0; i < (ZXDH_BAR_MSG_PLAYLOAD_OFFSET >> 2); i++) + zxdh_bar_chan_reg_write(subchan_addr, i * 4, *(data + i)); + + return ZXDH_BAR_MSG_OK; +} + +static uint16_t +zxdh_bar_chan_msg_header_get(uint64_t subchan_addr, + struct zxdh_bar_msg_header *msg_header) +{ + uint32_t *data = (uint32_t *)msg_header; + uint16_t i; + + for (i = 0; i < (ZXDH_BAR_MSG_PLAYLOAD_OFFSET >> 2); i++) + zxdh_bar_chan_reg_read(subchan_addr, i * 4, data + i); + + return ZXDH_BAR_MSG_OK; +} + +static uint16_t +zxdh_bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg, uint16_t len) +{ + uint32_t *data = (uint32_t *)msg; + uint32_t count = (len >> 2); + uint32_t remain = (len & 0x3); + uint32_t remain_data = 0; + uint32_t i; + + for (i = 0; i < count; i++) + zxdh_bar_chan_reg_write(subchan_addr, 4 * i + + ZXDH_BAR_MSG_PLAYLOAD_OFFSET, *(data + i)); + if (remain) { + for (i = 0; i < remain; i++) + remain_data |= *((uint8_t *)(msg + len - remain + i)) << (8 * i); + + zxdh_bar_chan_reg_write(subchan_addr, 4 * count + + ZXDH_BAR_MSG_PLAYLOAD_OFFSET, remain_data); + } + return ZXDH_BAR_MSG_OK; +} + +static uint16_t +zxdh_bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg, uint16_t len) +{ + uint32_t *data = (uint32_t *)msg; + uint32_t count = (len >> 2); + uint32_t remain_data = 0; + uint32_t remain = (len & 0x3); + uint32_t i; + + for (i = 0; i < count; i++) + zxdh_bar_chan_reg_read(subchan_addr, 4 * i + + ZXDH_BAR_MSG_PLAYLOAD_OFFSET, (data + i)); + if (remain) { + zxdh_bar_chan_reg_read(subchan_addr, 4 * count + + ZXDH_BAR_MSG_PLAYLOAD_OFFSET, &remain_data); + for (i = 0; i < remain; i++) + *((uint8_t *)(msg + (len - remain + i))) = remain_data >> (8 * i); + } + return ZXDH_BAR_MSG_OK; +} + +static uint16_t +zxdh_bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label) +{ + uint32_t data; + + zxdh_bar_chan_reg_read(subchan_addr, ZXDH_BAR_MSG_VALID_OFFSET, &data); + data &= (~ZXDH_BAR_MSG_VALID_MASK); + data |= (uint32_t)valid_label; + zxdh_bar_chan_reg_write(subchan_addr, ZXDH_BAR_MSG_VALID_OFFSET, data); + return ZXDH_BAR_MSG_OK; +} + +static uint16_t +zxdh_bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr, + uint16_t payload_len, struct zxdh_bar_msg_header *msg_header) +{ + uint16_t ret = 0; + ret = zxdh_bar_chan_msg_header_set(subchan_addr, msg_header); + + ret = zxdh_bar_chan_msg_header_get(subchan_addr, + (struct zxdh_bar_msg_header *)tmp_msg_header); + + ret = zxdh_bar_chan_msg_payload_set(subchan_addr, + (uint8_t *)(payload_addr), payload_len); + + ret = zxdh_bar_chan_msg_payload_get(subchan_addr, + tmp_msg_header, payload_len); + + ret = zxdh_bar_chan_msg_valid_set(subchan_addr, ZXDH_BAR_MSG_CHAN_USED); + return ret; +} + +static uint16_t +zxdh_bar_msg_valid_stat_get(uint64_t subchan_addr) +{ + uint32_t data; + + zxdh_bar_chan_reg_read(subchan_addr, ZXDH_BAR_MSG_VALID_OFFSET, &data); + if (ZXDH_BAR_MSG_CHAN_USABLE == (data & ZXDH_BAR_MSG_VALID_MASK)) + return ZXDH_BAR_MSG_CHAN_USABLE; + + return ZXDH_BAR_MSG_CHAN_USED; +} + +static uint16_t +zxdh_bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label) +{ + uint32_t data; + + zxdh_bar_chan_reg_read(subchan_addr, ZXDH_BAR_MSG_VALID_OFFSET, &data); + data &= (~(uint32_t)ZXDH_BAR_MSG_POL_MASK); + data |= ((uint32_t)label << ZXDH_BAR_MSG_POL_OFFSET); + zxdh_bar_chan_reg_write(subchan_addr, ZXDH_BAR_MSG_VALID_OFFSET, data); + return ZXDH_BAR_MSG_OK; +} + +static uint16_t +zxdh_bar_chan_sync_msg_reps_get(uint64_t subchan_addr, + uint64_t recv_buffer, uint16_t buffer_len) +{ + struct zxdh_bar_msg_header msg_header = {0}; + uint16_t msg_id = 0; + uint16_t msg_len = 0; + + zxdh_bar_chan_msg_header_get(subchan_addr, &msg_header); + msg_id = msg_header.msg_id; + struct zxdh_seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_id]; + + if (reps_info->flag != ZXDH_REPS_INFO_FLAG_USED) { + PMD_MSG_LOG(ERR, "msg_id %u unused", msg_id); + return ZXDH_BAR_MSG_ERR_REPLY; + } + msg_len = msg_header.len; + + if (msg_len > buffer_len - 4) { + PMD_MSG_LOG(ERR, "recv buffer len is: %u, but reply msg len is: %u", + buffer_len, msg_len + 4); + return ZXDH_BAR_MSG_ERR_REPSBUFF_LEN; + } + uint8_t *recv_msg = (uint8_t *)recv_buffer; + + zxdh_bar_chan_msg_payload_get(subchan_addr, + recv_msg + ZXDH_REPS_HEADER_PAYLOAD_OFFSET, msg_len); + *(uint16_t *)(recv_msg + ZXDH_REPS_HEADER_LEN_OFFSET) = msg_len; + *recv_msg = ZXDH_REPS_HEADER_REPLYED; /* set reps's valid */ + return ZXDH_BAR_MSG_OK; +} + +int +zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result) +{ + struct zxdh_bar_msg_header msg_header = {0}; + uint16_t seq_id = 0; + uint64_t subchan_addr = 0; + uint32_t time_out_cnt = 0; + uint16_t valid = 0; + int ret = 0; + + ret = zxdh_bar_chan_send_para_check(in, result); + if (ret != ZXDH_BAR_MSG_OK) + goto exit; + + ret = zxdh_bar_chan_save_recv_info(result, &seq_id); + if (ret != ZXDH_BAR_MSG_OK) + goto exit; + + zxdh_bar_chan_subchan_addr_get(in, &subchan_addr); + + msg_header.sync = ZXDH_BAR_CHAN_MSG_SYNC; + msg_header.emec = in->emec; + msg_header.usr = 0; + msg_header.rsv = 0; + msg_header.module_id = in->module_id; + msg_header.len = in->payload_len; + msg_header.msg_id = seq_id; + msg_header.src_pcieid = in->src_pcieid; + msg_header.dst_pcieid = in->dst_pcieid; + + ret = zxdh_bar_chan_lock(in->src, in->dst, in->src_pcieid, in->virt_addr); + if (ret != ZXDH_BAR_MSG_OK) { + zxdh_bar_chan_msgid_free(seq_id); + goto exit; + } + zxdh_bar_chan_msg_send(subchan_addr, in->payload_addr, in->payload_len, &msg_header); + + do { + rte_delay_us_block(ZXDH_BAR_MSG_POLLING_SPAN); + valid = zxdh_bar_msg_valid_stat_get(subchan_addr); + ++time_out_cnt; + } while ((time_out_cnt < ZXDH_BAR_MSG_TIMEOUT_TH) && (valid == ZXDH_BAR_MSG_CHAN_USED)); + + if (time_out_cnt == ZXDH_BAR_MSG_TIMEOUT_TH && valid != ZXDH_BAR_MSG_CHAN_USABLE) { + zxdh_bar_chan_msg_valid_set(subchan_addr, ZXDH_BAR_MSG_CHAN_USABLE); + zxdh_bar_chan_msg_poltag_set(subchan_addr, 0); + PMD_MSG_LOG(ERR, "BAR MSG ERR: chan type time out."); + ret = ZXDH_BAR_MSG_ERR_TIME_OUT; + } else { + ret = zxdh_bar_chan_sync_msg_reps_get(subchan_addr, + (uint64_t)result->recv_buffer, result->buffer_len); + } + zxdh_bar_chan_msgid_free(seq_id); + zxdh_bar_chan_unlock(in->src, in->dst, in->src_pcieid, in->virt_addr); + +exit: + return ret; +} + +static int +zxdh_bar_get_sum(uint8_t *ptr, uint8_t len) +{ + uint64_t sum = 0; + int idx; + + for (idx = 0; idx < len; idx++) + sum += *(ptr + idx); + + return (uint16_t)sum; +} + +static int +zxdh_bar_chan_enable(struct zxdh_msix_para *para, uint16_t *vport) +{ + struct zxdh_bar_recv_msg recv_msg = {0}; + int ret = 0; + int check_token = 0; + int sum_res = 0; + + if (!para) + return ZXDH_BAR_MSG_ERR_NULL; + + struct zxdh_msix_msg msix_msg = { + .pcie_id = para->pcie_id, + .vector_risc = para->vector_risc, + .vector_pfvf = para->vector_pfvf, + .vector_mpf = para->vector_mpf, + }; + struct zxdh_pci_bar_msg in = { + .virt_addr = para->virt_addr, + .payload_addr = &msix_msg, + .payload_len = sizeof(msix_msg), + .emec = 0, + .src = para->driver_type, + .dst = ZXDH_MSG_CHAN_END_RISC, + .module_id = ZXDH_BAR_MODULE_MISX, + .src_pcieid = para->pcie_id, + .dst_pcieid = 0, + .usr = 0, + }; + + struct zxdh_msg_recviver_mem result = { + .recv_buffer = &recv_msg, + .buffer_len = sizeof(recv_msg), + }; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != ZXDH_BAR_MSG_OK) + return -ret; + + check_token = recv_msg.msix_reps.check; + sum_res = zxdh_bar_get_sum((uint8_t *)&msix_msg, sizeof(msix_msg)); + + if (check_token != sum_res) { + PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x.", sum_res, check_token); + return ZXDH_BAR_MSG_ERR_REPLY; + } + *vport = recv_msg.msix_reps.vport; + PMD_MSG_LOG(DEBUG, "vport of pcieid: 0x%x get success.", para->pcie_id); + return ZXDH_BAR_MSG_OK; +} + +int +zxdh_msg_chan_enable(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_msix_para misx_info = { + .vector_risc = ZXDH_MSIX_FROM_RISCV, + .vector_pfvf = ZXDH_MSIX_FROM_PFVF, + .vector_mpf = ZXDH_MSIX_FROM_MPF, + .pcie_id = hw->pcie_id, + .driver_type = hw->is_pf ? ZXDH_MSG_CHAN_END_PF : ZXDH_MSG_CHAN_END_VF, + .virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET), + }; + + return zxdh_bar_chan_enable(&misx_info, &hw->vport.vport); +} + +static uint64_t +zxdh_recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr) +{ + uint8_t chan_id = 0; + uint8_t subchan_id = 0; + uint8_t src = 0; + uint8_t dst = 0; + + src = zxdh_bar_msg_dst_index_trans(src_type); + dst = zxdh_bar_msg_src_index_trans(dst_type); + if (src == ZXDH_BAR_MSG_SRC_ERR || dst == ZXDH_BAR_MSG_DST_ERR) + return 0; + + chan_id = chan_id_tbl[dst][src]; + subchan_id = 1 - subchan_id_tbl[dst][src]; + + return zxdh_subchan_addr_cal(virt_addr, chan_id, subchan_id); +} + +static void +zxdh_bar_msg_ack_async_msg_proc(struct zxdh_bar_msg_header *msg_header, + uint8_t *receiver_buff) +{ + struct zxdh_seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_header->msg_id]; + + if (reps_info->flag != ZXDH_REPS_INFO_FLAG_USED) { + PMD_MSG_LOG(ERR, "msg_id: %u is released", msg_header->msg_id); + return; + } + if (msg_header->len > reps_info->buffer_len - 4) { + PMD_MSG_LOG(ERR, "reps_buf_len is %u, but reps_msg_len is %u", + reps_info->buffer_len, msg_header->len + 4); + goto free_id; + } + uint8_t *reps_buffer = (uint8_t *)reps_info->reps_addr; + + rte_memcpy(reps_buffer + 4, receiver_buff, msg_header->len); + *(uint16_t *)(reps_buffer + 1) = msg_header->len; + *(uint8_t *)(reps_info->reps_addr) = ZXDH_REPS_HEADER_REPLYED; + +free_id: + zxdh_bar_chan_msgid_free(msg_header->msg_id); +} + +static void +zxdh_bar_msg_sync_msg_proc(uint64_t reply_addr, + struct zxdh_bar_msg_header *msg_header, + uint8_t *receiver_buff, void *dev) +{ + uint16_t reps_len = 0; + uint8_t *reps_buffer = NULL; + + reps_buffer = rte_malloc(NULL, ZXDH_BAR_MSG_PAYLOAD_MAX_LEN, 0); + if (reps_buffer == NULL) + return; + + zxdh_bar_chan_msg_recv_callback recv_func = msg_recv_func_tbl[msg_header->module_id]; + + recv_func(receiver_buff, msg_header->len, reps_buffer, &reps_len, dev); + msg_header->ack = ZXDH_BAR_CHAN_MSG_ACK; + msg_header->len = reps_len; + zxdh_bar_chan_msg_header_set(reply_addr, msg_header); + zxdh_bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len); + zxdh_bar_chan_msg_valid_set(reply_addr, ZXDH_BAR_MSG_CHAN_USABLE); + rte_free(reps_buffer); +} + +static uint64_t +zxdh_reply_addr_get(uint8_t sync, uint8_t src_type, + uint8_t dst_type, uint64_t virt_addr) +{ + uint64_t recv_rep_addr = 0; + uint8_t chan_id = 0; + uint8_t subchan_id = 0; + uint8_t src = 0; + uint8_t dst = 0; + + src = zxdh_bar_msg_dst_index_trans(src_type); + dst = zxdh_bar_msg_src_index_trans(dst_type); + if (src == ZXDH_BAR_MSG_SRC_ERR || dst == ZXDH_BAR_MSG_DST_ERR) + return 0; + + chan_id = chan_id_tbl[dst][src]; + subchan_id = 1 - subchan_id_tbl[dst][src]; + + if (sync == ZXDH_BAR_CHAN_MSG_SYNC) + recv_rep_addr = zxdh_subchan_addr_cal(virt_addr, chan_id, subchan_id); + else + recv_rep_addr = zxdh_subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id); + + return recv_rep_addr; +} + +static uint16_t +zxdh_bar_chan_msg_header_check(struct zxdh_bar_msg_header *msg_header) +{ + uint16_t len = 0; + uint8_t module_id = 0; + + if (msg_header->valid != ZXDH_BAR_MSG_CHAN_USED) { + PMD_MSG_LOG(ERR, "recv header ERR: valid label is not used."); + return ZXDH_BAR_MSG_ERR_MODULE; + } + module_id = msg_header->module_id; + + if (module_id >= (uint8_t)ZXDH_BAR_MSG_MODULE_NUM) { + PMD_MSG_LOG(ERR, "recv header ERR: invalid module_id: %u.", module_id); + return ZXDH_BAR_MSG_ERR_MODULE; + } + len = msg_header->len; + + if (len > ZXDH_BAR_MSG_PAYLOAD_MAX_LEN) { + PMD_MSG_LOG(ERR, "recv header ERR: invalid mesg len: %u.", len); + return ZXDH_BAR_MSG_ERR_LEN; + } + if (msg_recv_func_tbl[msg_header->module_id] == NULL) { + PMD_MSG_LOG(ERR, "recv header ERR: module:%s(%u) doesn't register", + zxdh_module_id_name(module_id), module_id); + return ZXDH_BAR_MSG_ERR_MODULE_NOEXIST; + } + return ZXDH_BAR_MSG_OK; +} + +int +zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev) +{ + struct zxdh_bar_msg_header msg_header = {0}; + uint64_t recv_addr = 0; + uint64_t reps_addr = 0; + uint16_t ret = 0; + uint8_t *recved_msg = NULL; + + recv_addr = zxdh_recv_addr_get(src, dst, virt_addr); + if (recv_addr == 0) { + PMD_MSG_LOG(ERR, "invalid driver type(src:%u, dst:%u).", src, dst); + return -1; + } + + zxdh_bar_chan_msg_header_get(recv_addr, &msg_header); + ret = zxdh_bar_chan_msg_header_check(&msg_header); + + if (ret != ZXDH_BAR_MSG_OK) { + PMD_MSG_LOG(ERR, "recv msg_head err, ret: %u.", ret); + return -1; + } + + recved_msg = rte_malloc(NULL, msg_header.len, 0); + if (recved_msg == NULL) { + PMD_MSG_LOG(ERR, "malloc temp buff failed."); + return -1; + } + zxdh_bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len); + + reps_addr = zxdh_reply_addr_get(msg_header.sync, src, dst, virt_addr); + + if (msg_header.sync == ZXDH_BAR_CHAN_MSG_SYNC) { + zxdh_bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev); + goto exit; + } + zxdh_bar_chan_msg_valid_set(recv_addr, ZXDH_BAR_MSG_CHAN_USABLE); + if (msg_header.ack == ZXDH_BAR_CHAN_MSG_ACK) { + zxdh_bar_msg_ack_async_msg_proc(&msg_header, recved_msg); + goto exit; + } + return 0; + +exit: + rte_free(recved_msg); + return ZXDH_BAR_MSG_OK; +} diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h new file mode 100644 index 0000000000..530ee406b1 --- /dev/null +++ b/drivers/net/zxdh/zxdh_msg.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_MSG_H +#define ZXDH_MSG_H + +#include + +#include + +#define ZXDH_BAR0_INDEX 0 +#define ZXDH_CTRLCH_OFFSET (0x2000) +#define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET (ZXDH_CTRLCH_OFFSET + 0x1000) + +#define ZXDH_MSIX_INTR_MSG_VEC_BASE 1 +#define ZXDH_MSIX_INTR_MSG_VEC_NUM 3 +#define ZXDH_MSIX_INTR_DTB_VEC (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM) +#define ZXDH_MSIX_INTR_DTB_VEC_NUM 1 +#define ZXDH_INTR_NONQUE_NUM (ZXDH_MSIX_INTR_MSG_VEC_NUM + ZXDH_MSIX_INTR_DTB_VEC_NUM + 1) +#define ZXDH_QUEUE_INTR_VEC_BASE (ZXDH_MSIX_INTR_DTB_VEC + ZXDH_MSIX_INTR_DTB_VEC_NUM) +#define ZXDH_QUEUE_INTR_VEC_NUM 256 + +#define ZXDH_BAR_MSG_POLLING_SPAN 100 +#define ZXDH_BAR_MSG_POLL_CNT_PER_MS (1 * 1000 / ZXDH_BAR_MSG_POLLING_SPAN) +#define ZXDH_BAR_MSG_POLL_CNT_PER_S (1 * 1000 * 1000 / ZXDH_BAR_MSG_POLLING_SPAN) +#define ZXDH_BAR_MSG_TIMEOUT_TH (10 * 1000 * 1000 / ZXDH_BAR_MSG_POLLING_SPAN) + +#define ZXDH_BAR_CHAN_MSG_SYNC 0 + +#define ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL (2 * 1024) /* channel size */ +#define ZXDH_BAR_MSG_PLAYLOAD_OFFSET (sizeof(struct zxdh_bar_msg_header)) +#define ZXDH_BAR_MSG_PAYLOAD_MAX_LEN \ + (ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct zxdh_bar_msg_header)) + +enum ZXDH_DRIVER_TYPE { + ZXDH_MSG_CHAN_END_MPF = 0, + ZXDH_MSG_CHAN_END_PF, + ZXDH_MSG_CHAN_END_VF, + ZXDH_MSG_CHAN_END_RISC, +}; + +enum ZXDH_MSG_VEC { + ZXDH_MSIX_FROM_PFVF = ZXDH_MSIX_INTR_MSG_VEC_BASE, + ZXDH_MSIX_FROM_MPF, + ZXDH_MSIX_FROM_RISCV, + ZXDH_MSG_VEC_NUM, +}; + +enum ZXDH_BAR_MSG_RTN { + ZXDH_BAR_MSG_OK = 0, + ZXDH_BAR_MSG_ERR_MSGID, + ZXDH_BAR_MSG_ERR_NULL, + ZXDH_BAR_MSG_ERR_TYPE, /* Message type exception */ + ZXDH_BAR_MSG_ERR_MODULE, /* Module ID exception */ + ZXDH_BAR_MSG_ERR_BODY_NULL, /* Message body exception */ + ZXDH_BAR_MSG_ERR_LEN, /* Message length exception */ + ZXDH_BAR_MSG_ERR_TIME_OUT, /* Message sending length too long */ + ZXDH_BAR_MSG_ERR_NOT_READY, /* Abnormal message sending conditions*/ + ZXDH_BAR_MEG_ERR_NULL_FUNC, /* Empty receive processing function pointer*/ + ZXDH_BAR_MSG_ERR_REPEAT_REGISTER, /* Module duplicate registration*/ + ZXDH_BAR_MSG_ERR_UNGISTER, /* Repeated deregistration*/ + /* + * The sending interface parameter boundary structure pointer is empty + */ + ZXDH_BAR_MSG_ERR_NULL_PARA, + ZXDH_BAR_MSG_ERR_REPSBUFF_LEN, /* The length of reps_buff is too short*/ + /* + * Unable to find the corresponding message processing function for this module + */ + ZXDH_BAR_MSG_ERR_MODULE_NOEXIST, + /* + * The virtual address in the parameters passed in by the sending interface is empty + */ + ZXDH_BAR_MSG_ERR_VIRTADDR_NULL, + ZXDH_BAR_MSG_ERR_REPLY, /* sync msg resp_error */ + ZXDH_BAR_MSG_ERR_MPF_NOT_SCANNED, + ZXDH_BAR_MSG_ERR_KERNEL_READY, + ZXDH_BAR_MSG_ERR_USR_RET_ERR, + ZXDH_BAR_MSG_ERR_ERR_PCIEID, + ZXDH_BAR_MSG_ERR_SOCKET, /* netlink sockte err */ +}; + +enum ZXDH_BAR_MODULE_ID { + ZXDH_BAR_MODULE_DBG = 0, /* 0: debug */ + ZXDH_BAR_MODULE_TBL, /* 1: resource table */ + ZXDH_BAR_MODULE_MISX, /* 2: config msix */ + ZXDH_BAR_MODULE_SDA, /* 3: */ + ZXDH_BAR_MODULE_RDMA, /* 4: */ + ZXDH_BAR_MODULE_DEMO, /* 5: channel test */ + ZXDH_BAR_MODULE_SMMU, /* 6: */ + ZXDH_BAR_MODULE_MAC, /* 7: mac rx/tx stats */ + ZXDH_BAR_MODULE_VDPA, /* 8: vdpa live migration */ + ZXDH_BAR_MODULE_VQM, /* 9: vqm live migration */ + ZXDH_BAR_MODULE_NP, /* 10: vf msg callback np */ + ZXDH_BAR_MODULE_VPORT, /* 11: get vport */ + ZXDH_BAR_MODULE_BDF, /* 12: get bdf */ + ZXDH_BAR_MODULE_RISC_READY, /* 13: */ + ZXDH_BAR_MODULE_REVERSE, /* 14: byte stream reverse */ + ZXDH_BAR_MDOULE_NVME, /* 15: */ + ZXDH_BAR_MDOULE_NPSDK, /* 16: */ + ZXDH_BAR_MODULE_NP_TODO, /* 17: */ + ZXDH_MODULE_BAR_MSG_TO_PF, /* 18: */ + ZXDH_MODULE_BAR_MSG_TO_VF, /* 19: */ + + ZXDH_MODULE_FLASH = 32, + ZXDH_BAR_MODULE_OFFSET_GET = 33, + ZXDH_BAR_EVENT_OVS_WITH_VCB = 36, + + ZXDH_BAR_MSG_MODULE_NUM = 100, +}; + +enum ZXDH_RES_TBL_FILED { + ZXDH_TBL_FIELD_PCIEID = 0, + ZXDH_TBL_FIELD_BDF = 1, + ZXDH_TBL_FIELD_MSGCH = 2, + ZXDH_TBL_FIELD_DATACH = 3, + ZXDH_TBL_FIELD_VPORT = 4, + ZXDH_TBL_FIELD_PNLID = 5, + ZXDH_TBL_FIELD_PHYPORT = 6, + ZXDH_TBL_FIELD_SERDES_NUM = 7, + ZXDH_TBL_FIELD_NP_PORT = 8, + ZXDH_TBL_FIELD_SPEED = 9, + ZXDH_TBL_FIELD_HASHID = 10, + ZXDH_TBL_FIELD_NON, +}; + +enum ZXDH_TBL_MSG_TYPE { + ZXDH_TBL_TYPE_READ, + ZXDH_TBL_TYPE_WRITE, + ZXDH_TBL_TYPE_NON, +}; + +struct zxdh_msix_para { + uint16_t pcie_id; + uint16_t vector_risc; + uint16_t vector_pfvf; + uint16_t vector_mpf; + uint64_t virt_addr; + uint16_t driver_type; /* refer to DRIVER_TYPE */ +}; + +struct zxdh_msix_msg { + uint16_t pcie_id; + uint16_t vector_risc; + uint16_t vector_pfvf; + uint16_t vector_mpf; +}; + +struct zxdh_pci_bar_msg { + uint64_t virt_addr; /* bar addr */ + void *payload_addr; + uint16_t payload_len; + uint16_t emec; + uint16_t src; /* refer to BAR_DRIVER_TYPE */ + uint16_t dst; /* refer to BAR_DRIVER_TYPE */ + uint16_t module_id; + uint16_t src_pcieid; + uint16_t dst_pcieid; + uint16_t usr; +}; + +struct zxdh_bar_msix_reps { + uint16_t pcie_id; + uint16_t check; + uint16_t vport; + uint16_t rsv; +} __rte_packed; + +struct zxdh_bar_offset_reps { + uint16_t check; + uint16_t rsv; + uint32_t offset; + uint32_t length; +} __rte_packed; + +struct zxdh_bar_recv_msg { + uint8_t reps_ok; + uint16_t reps_len; + uint8_t rsv; + union { + struct zxdh_bar_msix_reps msix_reps; + struct zxdh_bar_offset_reps offset_reps; + } __rte_packed; +} __rte_packed; + +struct zxdh_msg_recviver_mem { + void *recv_buffer; /* first 4B is head, followed by payload */ + uint64_t buffer_len; +}; + +struct zxdh_bar_msg_header { + uint8_t valid : 1; /* used by __bar_chan_msg_valid_set/get */ + uint8_t sync : 1; + uint8_t emec : 1; /* emergency */ + uint8_t ack : 1; /* ack msg */ + uint8_t poll : 1; + uint8_t usr : 1; + uint8_t rsv; + uint16_t module_id; + uint16_t len; + uint16_t msg_id; + uint16_t src_pcieid; + uint16_t dst_pcieid; /* used in PF-->VF */ +}; + +typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, + void *reps_buffer, uint16_t *reps_len, void *dev); + +int zxdh_msg_chan_init(void); +int zxdh_bar_msg_chan_exit(void); +int zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev); + +int zxdh_msg_chan_enable(struct rte_eth_dev *dev); +int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result); + +int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev); + +#endif /* ZXDH_MSG_H */ diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c new file mode 100644 index 0000000000..06d3f92b20 --- /dev/null +++ b/drivers/net/zxdh/zxdh_pci.c @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "zxdh_ethdev.h" +#include "zxdh_pci.h" +#include "zxdh_logs.h" +#include "zxdh_queue.h" + +#define ZXDH_PMD_DEFAULT_GUEST_FEATURES \ + (1ULL << ZXDH_NET_F_MRG_RXBUF | \ + 1ULL << ZXDH_NET_F_STATUS | \ + 1ULL << ZXDH_NET_F_MQ | \ + 1ULL << ZXDH_F_ANY_LAYOUT | \ + 1ULL << ZXDH_F_VERSION_1 | \ + 1ULL << ZXDH_F_RING_PACKED | \ + 1ULL << ZXDH_F_IN_ORDER | \ + 1ULL << ZXDH_F_NOTIFICATION_DATA | \ + 1ULL << ZXDH_NET_F_MAC) + +static void +zxdh_read_dev_config(struct zxdh_hw *hw, size_t offset, + void *dst, int32_t length) +{ + int32_t i = 0; + uint8_t *p = NULL; + uint8_t old_gen = 0; + uint8_t new_gen = 0; + + do { + old_gen = rte_read8(&hw->common_cfg->config_generation); + + p = dst; + for (i = 0; i < length; i++) + *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); + + new_gen = rte_read8(&hw->common_cfg->config_generation); + } while (old_gen != new_gen); +} + +static void +zxdh_write_dev_config(struct zxdh_hw *hw, size_t offset, + const void *src, int32_t length) +{ + int32_t i = 0; + const uint8_t *p = src; + + for (i = 0; i < length; i++) + rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); +} + +static uint8_t +zxdh_get_status(struct zxdh_hw *hw) +{ + return rte_read8(&hw->common_cfg->device_status); +} + +static void +zxdh_set_status(struct zxdh_hw *hw, uint8_t status) +{ + rte_write8(status, &hw->common_cfg->device_status); +} + +static uint64_t +zxdh_get_features(struct zxdh_hw *hw) +{ + uint32_t features_lo = 0; + uint32_t features_hi = 0; + + rte_write32(0, &hw->common_cfg->device_feature_select); + features_lo = rte_read32(&hw->common_cfg->device_feature); + + rte_write32(1, &hw->common_cfg->device_feature_select); + features_hi = rte_read32(&hw->common_cfg->device_feature); + + return ((uint64_t)features_hi << 32) | features_lo; +} + +static void +zxdh_set_features(struct zxdh_hw *hw, uint64_t features) +{ + rte_write32(0, &hw->common_cfg->guest_feature_select); + rte_write32(features & ((1ULL << 32) - 1), &hw->common_cfg->guest_feature); + rte_write32(1, &hw->common_cfg->guest_feature_select); + rte_write32(features >> 32, &hw->common_cfg->guest_feature); +} + +static uint16_t +zxdh_set_config_irq(struct zxdh_hw *hw, uint16_t vec) +{ + rte_write16(vec, &hw->common_cfg->msix_config); + return rte_read16(&hw->common_cfg->msix_config); +} + +static uint16_t +zxdh_set_queue_irq(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + rte_write16(vec, &hw->common_cfg->queue_msix_vector); + return rte_read16(&hw->common_cfg->queue_msix_vector); +} + +static uint8_t +zxdh_get_isr(struct zxdh_hw *hw) +{ + return rte_read8(hw->isr); +} + +static uint16_t +zxdh_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id) +{ + rte_write16(queue_id, &hw->common_cfg->queue_select); + return rte_read16(&hw->common_cfg->queue_size); +} + +static void +zxdh_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size) +{ + rte_write16(queue_id, &hw->common_cfg->queue_select); + rte_write16(vq_size, &hw->common_cfg->queue_size); +} + +static int32_t +check_vq_phys_addr_ok(struct zxdh_virtqueue *vq) +{ + if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) { + PMD_DRV_LOG(ERR, "vring address shouldn't be above 16TB!"); + return 0; + } + return 1; +} + +static inline void +io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) +{ + rte_write32(val & ((1ULL << 32) - 1), lo); + rte_write32(val >> 32, hi); +} + +static int32_t +zxdh_setup_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq) +{ + uint64_t desc_addr = 0; + uint64_t avail_addr = 0; + uint64_t used_addr = 0; + uint16_t notify_off = 0; + + if (!check_vq_phys_addr_ok(vq)) + return -1; + + desc_addr = vq->vq_ring_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct zxdh_vring_desc); + if (vtpci_packed_queue(vq->hw)) { + used_addr = RTE_ALIGN_CEIL((avail_addr + + sizeof(struct zxdh_vring_packed_desc_event)), + ZXDH_PCI_VRING_ALIGN); + } else { + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct zxdh_vring_avail, + ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN); + } + + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */ + notify_off = 0; + vq->notify_addr = (void *)((uint8_t *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + + rte_write16(1, &hw->common_cfg->queue_enable); + + return 0; +} + +static void +zxdh_del_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(0, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + rte_write16(0, &hw->common_cfg->queue_enable); +} + +const struct zxdh_pci_ops zxdh_dev_pci_ops = { + .read_dev_cfg = zxdh_read_dev_config, + .write_dev_cfg = zxdh_write_dev_config, + .get_status = zxdh_get_status, + .set_status = zxdh_set_status, + .get_features = zxdh_get_features, + .set_features = zxdh_set_features, + .set_queue_irq = zxdh_set_queue_irq, + .set_config_irq = zxdh_set_config_irq, + .get_isr = zxdh_get_isr, + .get_queue_num = zxdh_get_queue_num, + .set_queue_num = zxdh_set_queue_num, + .setup_queue = zxdh_setup_queue, + .del_queue = zxdh_del_queue, +}; + +uint8_t +zxdh_pci_isr(struct zxdh_hw *hw) +{ + return ZXDH_VTPCI_OPS(hw)->get_isr(hw); +} + +uint16_t +zxdh_pci_get_features(struct zxdh_hw *hw) +{ + return ZXDH_VTPCI_OPS(hw)->get_features(hw); +} + +void +zxdh_pci_reset(struct zxdh_hw *hw) +{ + PMD_DRV_LOG(INFO, "port %u device start reset, just wait...", hw->port_id); + uint32_t retry = 0; + + ZXDH_VTPCI_OPS(hw)->set_status(hw, ZXDH_CONFIG_STATUS_RESET); + /* Flush status write and wait device ready max 3 seconds. */ + while (ZXDH_VTPCI_OPS(hw)->get_status(hw) != ZXDH_CONFIG_STATUS_RESET) { + ++retry; + rte_delay_ms(1); + } + PMD_DRV_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry); +} + +void +zxdh_pci_reinit_complete(struct zxdh_hw *hw) +{ + zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER_OK); +} + +void +zxdh_pci_set_status(struct zxdh_hw *hw, uint8_t status) +{ + if (status != ZXDH_CONFIG_STATUS_RESET) + status |= ZXDH_VTPCI_OPS(hw)->get_status(hw); + + ZXDH_VTPCI_OPS(hw)->set_status(hw, status); +} + +static void +*get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap) +{ + uint8_t bar = cap->bar; + uint32_t length = cap->length; + uint32_t offset = cap->offset; + + if (bar >= PCI_MAX_RESOURCE) { + PMD_DRV_LOG(ERR, "invalid bar: %u", bar); + return NULL; + } + if (offset + length < offset) { + PMD_DRV_LOG(ERR, "offset(%u) + length(%u) overflows", offset, length); + return NULL; + } + if (offset + length > dev->mem_resource[bar].len) { + PMD_DRV_LOG(ERR, "invalid cap: overflows bar space"); + return NULL; + } + uint8_t *base = dev->mem_resource[bar].addr; + + if (base == NULL) { + PMD_DRV_LOG(ERR, "bar %u base addr is NULL", bar); + return NULL; + } + return base + offset; +} + +int32_t +zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw) +{ + struct zxdh_pci_cap cap; + uint8_t pos = 0; + int32_t ret = 0; + + if (dev->mem_resource[0].addr == NULL) { + PMD_DRV_LOG(ERR, "bar0 base addr is NULL"); + return -1; + } + + hw->use_msix = zxdh_pci_msix_detect(dev); + + pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_VNDR); + while (pos) { + ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); + if (ret != sizeof(cap)) { + PMD_DRV_LOG(ERR, "failed to read pci cap at pos: %x ret %d", pos, ret); + break; + } + if (cap.cap_vndr != RTE_PCI_CAP_ID_VNDR) { + PMD_DRV_LOG(DEBUG, "[%2x] skipping non VNDR cap id: %02x", + pos, cap.cap_vndr); + goto next; + } + PMD_DRV_LOG(DEBUG, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", + pos, cap.cfg_type, cap.bar, cap.offset, cap.length); + + switch (cap.cfg_type) { + case ZXDH_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cfg_addr(dev, &cap); + break; + case ZXDH_PCI_CAP_NOTIFY_CFG: { + ret = rte_pci_read_config(dev, &hw->notify_off_multiplier, + 4, pos + sizeof(cap)); + if (ret != 4) + PMD_DRV_LOG(ERR, + "failed to read notify_off_multiplier, ret %d", ret); + else + hw->notify_base = get_cfg_addr(dev, &cap); + break; + } + case ZXDH_PCI_CAP_DEVICE_CFG: + hw->dev_cfg = get_cfg_addr(dev, &cap); + break; + case ZXDH_PCI_CAP_ISR_CFG: + hw->isr = get_cfg_addr(dev, &cap); + break; + case ZXDH_PCI_CAP_PCI_CFG: { + hw->pcie_id = *(uint16_t *)&cap.padding[1]; + PMD_DRV_LOG(DEBUG, "get pcie id 0x%x", hw->pcie_id); + + if ((hw->pcie_id >> 11) & 0x1) /* PF */ { + PMD_DRV_LOG(DEBUG, "EP %u PF %u", + hw->pcie_id >> 12, (hw->pcie_id >> 8) & 0x7); + } else { /* VF */ + PMD_DRV_LOG(DEBUG, "EP %u PF %u VF %u", + hw->pcie_id >> 12, + (hw->pcie_id >> 8) & 0x7, + hw->pcie_id & 0xff); + } + break; + } + } +next: + pos = cap.cap_next; + } + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->dev_cfg == NULL || hw->isr == NULL) { + PMD_DRV_LOG(ERR, "no zxdh pci device found."); + return -1; + } + return 0; +} + +void +zxdh_pci_read_dev_config(struct zxdh_hw *hw, size_t offset, void *dst, int32_t length) +{ + ZXDH_VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); +} + +void +zxdh_get_pci_dev_config(struct zxdh_hw *hw) +{ + uint64_t guest_features = 0; + uint64_t nego_features = 0; + uint32_t max_queue_pairs = 0; + + hw->host_features = zxdh_pci_get_features(hw); + + guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES; + nego_features = guest_features & hw->host_features; + + hw->guest_features = nego_features; + + if (hw->guest_features & (1ULL << ZXDH_NET_F_MAC)) { + zxdh_pci_read_dev_config(hw, offsetof(struct zxdh_net_config, mac), + &hw->mac_addr, RTE_ETHER_ADDR_LEN); + } else { + rte_eth_random_addr(&hw->mac_addr[0]); + } + + zxdh_pci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs), + &max_queue_pairs, sizeof(max_queue_pairs)); + + if (max_queue_pairs == 0) + hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX; + else + hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs); + PMD_DRV_LOG(DEBUG, "set max queue pairs %d", hw->max_queue_pairs); +} + +enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev) +{ + uint16_t flags = 0; + uint8_t pos = 0; + int16_t ret = 0; + + pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX); + + if (pos > 0) { + ret = rte_pci_read_config(dev, &flags, 2, pos + RTE_PCI_MSIX_FLAGS); + if (ret == 2 && flags & RTE_PCI_MSIX_FLAGS_ENABLE) + return ZXDH_MSIX_ENABLED; + else + return ZXDH_MSIX_DISABLED; + } + return ZXDH_MSIX_NONE; +} diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h new file mode 100644 index 0000000000..ed6fd89742 --- /dev/null +++ b/drivers/net/zxdh/zxdh_pci.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_PCI_H +#define ZXDH_PCI_H + +#include +#include + +#include + +#include "zxdh_ethdev.h" + +enum zxdh_msix_status { + ZXDH_MSIX_NONE = 0, + ZXDH_MSIX_DISABLED = 1, + ZXDH_MSIX_ENABLED = 2 +}; + +/* The bit of the ISR which indicates a device has an interrupt. */ +#define ZXDH_PCI_ISR_INTR 0x1 +/* The bit of the ISR which indicates a device configuration change. */ +#define ZXDH_PCI_ISR_CONFIG 0x2 +/* Vector value used to disable MSI for queue. */ +#define ZXDH_MSI_NO_VECTOR 0x7F + +#define ZXDH_PCI_VRING_ALIGN 4096 + +#define ZXDH_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define ZXDH_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define ZXDH_NET_F_MTU 3 /* Initial MTU advice. */ +#define ZXDH_NET_F_MAC 5 /* Host has given MAC address. */ +#define ZXDH_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ +#define ZXDH_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ +#define ZXDH_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ +#define ZXDH_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ + +#define ZXDH_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define ZXDH_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ +#define ZXDH_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ +#define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define ZXDH_NET_F_STATUS 16 /* zxdh_net_config.status available */ +#define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */ +#define ZXDH_F_ANY_LAYOUT 27 /* Can the device handle any descriptor layout */ +#define ZXDH_F_VERSION_1 32 +#define ZXDH_F_RING_PACKED 34 +#define ZXDH_F_IN_ORDER 35 +#define ZXDH_F_NOTIFICATION_DATA 38 + +#define ZXDH_PCI_CAP_COMMON_CFG 1 /* Common configuration */ +#define ZXDH_PCI_CAP_NOTIFY_CFG 2 /* Notifications */ +#define ZXDH_PCI_CAP_ISR_CFG 3 /* ISR Status */ +#define ZXDH_PCI_CAP_DEVICE_CFG 4 /* Device specific configuration */ +#define ZXDH_PCI_CAP_PCI_CFG 5 /* PCI configuration access */ + +/* Status byte for guest to report progress. */ +#define ZXDH_CONFIG_STATUS_RESET 0x00 +#define ZXDH_CONFIG_STATUS_ACK 0x01 +#define ZXDH_CONFIG_STATUS_DRIVER 0x02 +#define ZXDH_CONFIG_STATUS_DRIVER_OK 0x04 +#define ZXDH_CONFIG_STATUS_FEATURES_OK 0x08 +#define ZXDH_CONFIG_STATUS_DEV_NEED_RESET 0x40 +#define ZXDH_CONFIG_STATUS_FAILED 0x80 +#define ZXDH_PCI_QUEUE_ADDR_SHIFT 12 + +struct zxdh_net_config { + /* The config defining mac address (if ZXDH_NET_F_MAC) */ + uint8_t mac[RTE_ETHER_ADDR_LEN]; + /* See ZXDH_NET_F_STATUS and ZXDH_NET_S_* above */ + uint16_t status; + uint16_t max_virtqueue_pairs; + uint16_t mtu; + uint32_t speed; + uint8_t duplex; +} __rte_packed; + +/* This is the PCI capability header: */ +struct zxdh_pci_cap { + uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + uint8_t cap_next; /* Generic PCI field: next ptr. */ + uint8_t cap_len; /* Generic PCI field: capability length */ + uint8_t cfg_type; /* Identifies the structure. */ + uint8_t bar; /* Where to find it. */ + uint8_t padding[3]; /* Pad to full dword. */ + uint32_t offset; /* Offset within bar. */ + uint32_t length; /* Length of the structure, in bytes. */ +}; + +/* Fields in ZXDH_PCI_CAP_COMMON_CFG: */ +struct zxdh_pci_common_cfg { + /* About the whole device. */ + uint32_t device_feature_select; /* read-write */ + uint32_t device_feature; /* read-only */ + uint32_t guest_feature_select; /* read-write */ + uint32_t guest_feature; /* read-write */ + uint16_t msix_config; /* read-write */ + uint16_t num_queues; /* read-only */ + uint8_t device_status; /* read-write */ + uint8_t config_generation; /* read-only */ + + /* About a specific virtqueue. */ + uint16_t queue_select; /* read-write */ + uint16_t queue_size; /* read-write, power of 2. */ + uint16_t queue_msix_vector; /* read-write */ + uint16_t queue_enable; /* read-write */ + uint16_t queue_notify_off; /* read-only */ + uint32_t queue_desc_lo; /* read-write */ + uint32_t queue_desc_hi; /* read-write */ + uint32_t queue_avail_lo; /* read-write */ + uint32_t queue_avail_hi; /* read-write */ + uint32_t queue_used_lo; /* read-write */ + uint32_t queue_used_hi; /* read-write */ +}; + +static inline int32_t +vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit) +{ + return (hw->guest_features & (1ULL << bit)) != 0; +} + +static inline int32_t +vtpci_packed_queue(struct zxdh_hw *hw) +{ + return vtpci_with_feature(hw, ZXDH_F_RING_PACKED); +} + +struct zxdh_pci_ops { + void (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len); + void (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len); + + uint8_t (*get_status)(struct zxdh_hw *hw); + void (*set_status)(struct zxdh_hw *hw, uint8_t status); + + uint64_t (*get_features)(struct zxdh_hw *hw); + void (*set_features)(struct zxdh_hw *hw, uint64_t features); + uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec); + uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec); + uint8_t (*get_isr)(struct zxdh_hw *hw); + uint16_t (*get_queue_num)(struct zxdh_hw *hw, uint16_t queue_id); + void (*set_queue_num)(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size); + + int32_t (*setup_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq); + void (*del_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq); +}; + +struct zxdh_hw_internal { + const struct zxdh_pci_ops *zxdh_vtpci_ops; +}; + +#define ZXDH_VTPCI_OPS(hw) (zxdh_hw_internal[(hw)->port_id].zxdh_vtpci_ops) + +extern struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS]; +extern const struct zxdh_pci_ops zxdh_dev_pci_ops; + +void zxdh_pci_reset(struct zxdh_hw *hw); +void zxdh_pci_read_dev_config(struct zxdh_hw *hw, size_t offset, + void *dst, int32_t length); + +int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw); +void zxdh_get_pci_dev_config(struct zxdh_hw *hw); + +uint16_t zxdh_pci_get_features(struct zxdh_hw *hw); +enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev); +uint8_t zxdh_pci_isr(struct zxdh_hw *hw); +void zxdh_pci_reinit_complete(struct zxdh_hw *hw); +void zxdh_pci_set_status(struct zxdh_hw *hw, uint8_t status); + +#endif /* ZXDH_PCI_H */ diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c new file mode 100644 index 0000000000..462a88b23c --- /dev/null +++ b/drivers/net/zxdh/zxdh_queue.c @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include +#include + +#include "zxdh_queue.h" +#include "zxdh_logs.h" +#include "zxdh_pci.h" +#include "zxdh_common.h" +#include "zxdh_msg.h" + +struct rte_mbuf * +zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq) +{ + struct rte_mbuf *cookie = NULL; + int32_t idx = 0; + + if (vq == NULL) + return NULL; + + for (idx = 0; idx < vq->vq_nentries; idx++) { + cookie = vq->vq_descx[idx].cookie; + if (cookie != NULL) { + vq->vq_descx[idx].cookie = NULL; + return cookie; + } + } + return NULL; +} + +static int32_t +zxdh_release_channel(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t nr_vq = hw->queue_num; + uint32_t var = 0; + uint32_t addr = 0; + uint32_t widx = 0; + uint32_t bidx = 0; + uint16_t pch = 0; + uint16_t lch = 0; + int32_t ret = 0; + + ret = zxdh_timedlock(hw, 1000); + if (ret) { + PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout"); + return -1; + } + + for (lch = 0; lch < nr_vq; lch++) { + if (hw->channel_context[lch].valid == 0) { + PMD_DRV_LOG(DEBUG, "Logic channel %d does not need to release", lch); + continue; + } + + pch = hw->channel_context[lch].ph_chno; + widx = pch / 32; + bidx = pch % 32; + + addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t)); + var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr); + var &= ~(1 << bidx); + zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var); + + hw->channel_context[lch].valid = 0; + hw->channel_context[lch].ph_chno = 0; + } + + zxdh_release_lock(hw); + + return 0; +} + +int32_t +zxdh_get_queue_type(uint16_t vtpci_queue_idx) +{ + if (vtpci_queue_idx % 2 == 0) + return ZXDH_VTNET_RQ; + else + return ZXDH_VTNET_TQ; +} + +int32_t +zxdh_free_queues(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t nr_vq = hw->queue_num; + struct zxdh_virtqueue *vq = NULL; + int32_t queue_type = 0; + uint16_t i = 0; + + if (hw->vqs == NULL) + return 0; + + if (zxdh_release_channel(dev) < 0) { + PMD_DRV_LOG(ERR, "Failed to clear coi table"); + return -1; + } + + for (i = 0; i < nr_vq; i++) { + vq = hw->vqs[i]; + if (vq == NULL) + continue; + + ZXDH_VTPCI_OPS(hw)->del_queue(hw, vq); + queue_type = zxdh_get_queue_type(i); + if (queue_type == ZXDH_VTNET_RQ) { + rte_free(vq->sw_ring); + rte_memzone_free(vq->rxq.mz); + } else if (queue_type == ZXDH_VTNET_TQ) { + rte_memzone_free(vq->txq.mz); + rte_memzone_free(vq->txq.zxdh_net_hdr_mz); + } + + rte_free(vq); + hw->vqs[i] = NULL; + PMD_DRV_LOG(DEBUG, "Release to queue %d success!", i); + } + + rte_free(hw->vqs); + hw->vqs = NULL; + + return 0; +} diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h new file mode 100644 index 0000000000..1088bf08fc --- /dev/null +++ b/drivers/net/zxdh/zxdh_queue.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_QUEUE_H +#define ZXDH_QUEUE_H + +#include + +#include + +#include "zxdh_ethdev.h" +#include "zxdh_rxtx.h" +#include "zxdh_pci.h" + +enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; + +#define ZXDH_VIRTQUEUE_MAX_NAME_SZ 32 +#define ZXDH_RQ_QUEUE_IDX 0 +#define ZXDH_TQ_QUEUE_IDX 1 +#define ZXDH_MAX_TX_INDIRECT 8 + +/* This marks a buffer as write-only (otherwise read-only). */ +#define ZXDH_VRING_DESC_F_WRITE 2 +/* This flag means the descriptor was made available by the driver */ +#define ZXDH_VRING_PACKED_DESC_F_AVAIL (1 << (7)) + +#define ZXDH_RING_EVENT_FLAGS_ENABLE 0x0 +#define ZXDH_RING_EVENT_FLAGS_DISABLE 0x1 +#define ZXDH_RING_EVENT_FLAGS_DESC 0x2 + +#define ZXDH_VQ_RING_DESC_CHAIN_END 32768 + +/* + * ring descriptors: 16 bytes. + * These can chain together via "next". + */ +struct zxdh_vring_desc { + uint64_t addr; /* Address (guest-physical). */ + uint32_t len; /* Length. */ + uint16_t flags; /* The flags as indicated above. */ + uint16_t next; /* We chain unused descriptors via this. */ +} __rte_packed; + +struct zxdh_vring_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was written to. */ + uint32_t len; +}; + +struct zxdh_vring_used { + uint16_t flags; + uint16_t idx; + struct zxdh_vring_used_elem ring[]; +} __rte_packed; + +struct zxdh_vring_avail { + uint16_t flags; + uint16_t idx; + uint16_t ring[]; +} __rte_packed; + +struct zxdh_vring_packed_desc { + uint64_t addr; + uint32_t len; + uint16_t id; + uint16_t flags; +} __rte_packed; + +struct zxdh_vring_packed_desc_event { + uint16_t desc_event_off_wrap; + uint16_t desc_event_flags; +} __rte_packed; + +struct zxdh_vring_packed { + uint32_t num; + struct zxdh_vring_packed_desc *desc; + struct zxdh_vring_packed_desc_event *driver; + struct zxdh_vring_packed_desc_event *device; +} __rte_packed; + +struct zxdh_vq_desc_extra { + void *cookie; + uint16_t ndescs; + uint16_t next; +} __rte_packed; + +struct zxdh_virtqueue { + struct zxdh_hw *hw; /* < zxdh_hw structure pointer. */ + struct { + /* vring keeping descs and events */ + struct zxdh_vring_packed ring; + uint8_t used_wrap_counter; + uint8_t rsv; + uint16_t cached_flags; /* < cached flags for descs */ + uint16_t event_flags_shadow; + uint16_t rsv1; + } __rte_packed vq_packed; + uint16_t vq_used_cons_idx; /* < last consumed descriptor */ + uint16_t vq_nentries; /* < vring desc numbers */ + uint16_t vq_free_cnt; /* < num of desc available */ + uint16_t vq_avail_idx; /* < sync until needed */ + uint16_t vq_free_thresh; /* < free threshold */ + uint16_t rsv2; + + void *vq_ring_virt_mem; /* < linear address of vring */ + uint32_t vq_ring_size; + + union { + struct zxdh_virtnet_rx rxq; + struct zxdh_virtnet_tx txq; + }; + + /* + * physical address of vring, or virtual address + */ + rte_iova_t vq_ring_mem; + + /* + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; + uint16_t vq_queue_index; /* < PCI queue index */ + uint16_t offset; /* < relative offset to obtain addr in mbuf */ + uint16_t *notify_addr; + struct rte_mbuf **sw_ring; /* < RX software ring. */ + struct zxdh_vq_desc_extra vq_descx[]; +} __rte_packed; + +struct zxdh_type_hdr { + uint8_t port; /* bit[0:1] 00-np 01-DRS 10-DTP */ + uint8_t pd_len; + uint8_t num_buffers; + uint8_t reserved; +} __rte_packed; /* 4B */ + +struct zxdh_pi_hdr { + uint8_t pi_len; + uint8_t pkt_type; + uint16_t vlan_id; + uint32_t ipv6_extend; + uint16_t l3_offset; + uint16_t l4_offset; + uint8_t phy_port; + uint8_t pkt_flag_hi8; + uint16_t pkt_flag_lw16; + union { + struct { + uint64_t sa_idx; + uint8_t reserved_8[8]; + } dl; + struct { + uint32_t lro_flag; + uint32_t lro_mss; + uint16_t err_code; + uint16_t pm_id; + uint16_t pkt_len; + uint8_t reserved[2]; + } ul; + }; +} __rte_packed; /* 32B */ + +struct zxdh_pd_hdr_dl { + uint32_t ol_flag; + uint8_t tag_idx; + uint8_t tag_data; + uint16_t dst_vfid; + uint32_t svlan_insert; + uint32_t cvlan_insert; +} __rte_packed; /* 16B */ + +struct zxdh_net_hdr_dl { + struct zxdh_type_hdr type_hdr; /* 4B */ + struct zxdh_pi_hdr pi_hdr; /* 32B */ + struct zxdh_pd_hdr_dl pd_hdr; /* 16B */ +} __rte_packed; + +struct zxdh_pd_hdr_ul { + uint32_t pkt_flag; + uint32_t rss_hash; + uint32_t fd; + uint32_t striped_vlan_tci; + uint8_t tag_idx; + uint8_t tag_data; + uint16_t src_vfid; + uint16_t pkt_type_out; + uint16_t pkt_type_in; +} __rte_packed; /* 24B */ + +struct zxdh_net_hdr_ul { + struct zxdh_type_hdr type_hdr; /* 4B */ + struct zxdh_pi_hdr pi_hdr; /* 32B */ + struct zxdh_pd_hdr_ul pd_hdr; /* 24B */ +} __rte_packed; /* 60B */ + +struct zxdh_tx_region { + struct zxdh_net_hdr_dl tx_hdr; + union { + struct zxdh_vring_desc tx_indir[ZXDH_MAX_TX_INDIRECT]; + struct zxdh_vring_packed_desc tx_packed_indir[ZXDH_MAX_TX_INDIRECT]; + } __rte_packed; +}; + +static inline size_t +vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align) +{ + size_t size; + + if (vtpci_packed_queue(hw)) { + size = num * sizeof(struct zxdh_vring_packed_desc); + size += sizeof(struct zxdh_vring_packed_desc_event); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct zxdh_vring_packed_desc_event); + return size; + } + + size = num * sizeof(struct zxdh_vring_desc); + size += sizeof(struct zxdh_vring_avail) + (num * sizeof(uint16_t)); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct zxdh_vring_used) + (num * sizeof(struct zxdh_vring_used_elem)); + return size; +} + +static inline void +vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p, + unsigned long align, uint32_t num) +{ + vr->num = num; + vr->desc = (struct zxdh_vring_packed_desc *)p; + vr->driver = (struct zxdh_vring_packed_desc_event *)(p + + vr->num * sizeof(struct zxdh_vring_packed_desc)); + vr->device = (struct zxdh_vring_packed_desc_event *)RTE_ALIGN_CEIL(((uintptr_t)vr->driver + + sizeof(struct zxdh_vring_packed_desc_event)), align); +} + +static inline void +vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n) +{ + int32_t i = 0; + + for (i = 0; i < n - 1; i++) { + vq->vq_packed.ring.desc[i].id = i; + vq->vq_descx[i].next = i + 1; + } + vq->vq_packed.ring.desc[i].id = i; + vq->vq_descx[i].next = ZXDH_VQ_RING_DESC_CHAIN_END; +} + +static inline void +vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n) +{ + int32_t i = 0; + + for (i = 0; i < n; i++) { + dp[i].id = (uint16_t)i; + dp[i].flags = ZXDH_VRING_DESC_F_WRITE; + } +} + +static inline void +virtqueue_disable_intr(struct zxdh_virtqueue *vq) +{ + if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE; + vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow; + } +} + +struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq); +int32_t zxdh_free_queues(struct rte_eth_dev *dev); +int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx); + +#endif /* ZXDH_QUEUE_H */ diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h new file mode 100644 index 0000000000..de9353b223 --- /dev/null +++ b/drivers/net/zxdh/zxdh_rxtx.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#ifndef ZXDH_RXTX_H +#define ZXDH_RXTX_H + +#include + +#include +#include + +struct zxdh_virtnet_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; + uint64_t multicast; + uint64_t broadcast; + uint64_t truncated_err; + uint64_t size_bins[8]; +}; + +struct zxdh_virtnet_rx { + struct zxdh_virtqueue *vq; + + /* dummy mbuf, for wraparound when processing RX ring. */ + struct rte_mbuf fake_mbuf; + + uint64_t mbuf_initializer; /* value to init mbufs. */ + struct rte_mempool *mpool; /* mempool for mbuf allocation */ + uint16_t queue_id; /* DPDK queue index. */ + uint16_t port_id; /* Device port identifier. */ + struct zxdh_virtnet_stats stats; + const struct rte_memzone *mz; /* mem zone to populate RX ring. */ +} __rte_packed; + +struct zxdh_virtnet_tx { + struct zxdh_virtqueue *vq; + const struct rte_memzone *zxdh_net_hdr_mz; /* memzone to populate hdr. */ + rte_iova_t zxdh_net_hdr_mem; /* hdr for each xmit packet */ + uint16_t queue_id; /* DPDK queue index. */ + uint16_t port_id; /* Device port identifier. */ + struct zxdh_virtnet_stats stats; + const struct rte_memzone *mz; /* mem zone to populate TX ring. */ +} __rte_packed; + +#endif /* ZXDH_RXTX_H */ diff --git a/lib/power/power_acpi_cpufreq.c b/drivers/power/acpi/acpi_cpufreq.c similarity index 95% rename from lib/power/power_acpi_cpufreq.c rename to drivers/power/acpi/acpi_cpufreq.c index ae809fbb60..81a5e3f6ea 100644 --- a/lib/power/power_acpi_cpufreq.c +++ b/drivers/power/acpi/acpi_cpufreq.c @@ -10,7 +10,7 @@ #include #include -#include "power_acpi_cpufreq.h" +#include "acpi_cpufreq.h" #include "power_common.h" #define STR_SIZE 1024 @@ -587,3 +587,23 @@ int power_acpi_get_capabilities(unsigned int lcore_id, return 0; } + +static struct rte_power_cpufreq_ops acpi_ops = { + .name = "acpi", + .init = power_acpi_cpufreq_init, + .exit = power_acpi_cpufreq_exit, + .check_env_support = power_acpi_cpufreq_check_supported, + .get_avail_freqs = power_acpi_cpufreq_freqs, + .get_freq = power_acpi_cpufreq_get_freq, + .set_freq = power_acpi_cpufreq_set_freq, + .freq_down = power_acpi_cpufreq_freq_down, + .freq_up = power_acpi_cpufreq_freq_up, + .freq_max = power_acpi_cpufreq_freq_max, + .freq_min = power_acpi_cpufreq_freq_min, + .turbo_status = power_acpi_turbo_status, + .enable_turbo = power_acpi_enable_turbo, + .disable_turbo = power_acpi_disable_turbo, + .get_caps = power_acpi_get_capabilities +}; + +RTE_POWER_REGISTER_CPUFREQ_OPS(acpi_ops); diff --git a/lib/power/power_acpi_cpufreq.h b/drivers/power/acpi/acpi_cpufreq.h similarity index 97% rename from lib/power/power_acpi_cpufreq.h rename to drivers/power/acpi/acpi_cpufreq.h index 682fd9278c..01727182ae 100644 --- a/lib/power/power_acpi_cpufreq.h +++ b/drivers/power/acpi/acpi_cpufreq.h @@ -2,15 +2,15 @@ * Copyright(c) 2010-2014 Intel Corporation */ -#ifndef _POWER_ACPI_CPUFREQ_H -#define _POWER_ACPI_CPUFREQ_H +#ifndef ACPI_CPUFREQ_H +#define ACPI_CPUFREQ_H /** * @file - * RTE Power Management via userspace ACPI cpufreq + * Power Management via userspace ACPI cpufreq */ -#include "rte_power.h" +#include "power_cpufreq.h" /** * Check if ACPI power management is supported. @@ -214,4 +214,4 @@ int power_acpi_disable_turbo(unsigned int lcore_id); int power_acpi_get_capabilities(unsigned int lcore_id, struct rte_power_core_capabilities *caps); -#endif +#endif /* ACPI_CPUFREQ_H */ diff --git a/drivers/power/acpi/meson.build b/drivers/power/acpi/meson.build new file mode 100644 index 0000000000..f5afc893ce --- /dev/null +++ b/drivers/power/acpi/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +if not is_linux + build = false + reason = 'only supported on Linux' +endif +sources = files('acpi_cpufreq.c') + +deps += ['power'] diff --git a/lib/power/power_amd_pstate_cpufreq.c b/drivers/power/amd_pstate/amd_pstate_cpufreq.c similarity index 95% rename from lib/power/power_amd_pstate_cpufreq.c rename to drivers/power/amd_pstate/amd_pstate_cpufreq.c index 2b728eca18..95495bff7d 100644 --- a/lib/power/power_amd_pstate_cpufreq.c +++ b/drivers/power/amd_pstate/amd_pstate_cpufreq.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2021 Intel Corporation * Copyright(c) 2021 Arm Limited - * Copyright(c) 2023 Amd Limited + * Copyright(c) 2024 Advanced Micro Devices, Inc. */ #include @@ -9,7 +9,7 @@ #include #include -#include "power_amd_pstate_cpufreq.h" +#include "amd_pstate_cpufreq.h" #include "power_common.h" /* macros used for rounding frequency to nearest 1000 */ @@ -710,3 +710,23 @@ power_amd_pstate_get_capabilities(unsigned int lcore_id, return 0; } + +static struct rte_power_cpufreq_ops amd_pstate_ops = { + .name = "amd-pstate", + .init = power_amd_pstate_cpufreq_init, + .exit = power_amd_pstate_cpufreq_exit, + .check_env_support = power_amd_pstate_cpufreq_check_supported, + .get_avail_freqs = power_amd_pstate_cpufreq_freqs, + .get_freq = power_amd_pstate_cpufreq_get_freq, + .set_freq = power_amd_pstate_cpufreq_set_freq, + .freq_down = power_amd_pstate_cpufreq_freq_down, + .freq_up = power_amd_pstate_cpufreq_freq_up, + .freq_max = power_amd_pstate_cpufreq_freq_max, + .freq_min = power_amd_pstate_cpufreq_freq_min, + .turbo_status = power_amd_pstate_turbo_status, + .enable_turbo = power_amd_pstate_enable_turbo, + .disable_turbo = power_amd_pstate_disable_turbo, + .get_caps = power_amd_pstate_get_capabilities +}; + +RTE_POWER_REGISTER_CPUFREQ_OPS(amd_pstate_ops); diff --git a/lib/power/power_amd_pstate_cpufreq.h b/drivers/power/amd_pstate/amd_pstate_cpufreq.h similarity index 95% rename from lib/power/power_amd_pstate_cpufreq.h rename to drivers/power/amd_pstate/amd_pstate_cpufreq.h index b02f9f98e4..9de2c2b39d 100644 --- a/lib/power/power_amd_pstate_cpufreq.h +++ b/drivers/power/amd_pstate/amd_pstate_cpufreq.h @@ -1,18 +1,18 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2021 Intel Corporation * Copyright(c) 2021 Arm Limited - * Copyright(c) 2023 Amd Limited + * Copyright(c) 2024 Advanced Micro Devices, Inc. */ -#ifndef _POWER_AMD_PSTATE_CPUFREQ_H -#define _POWER_AMD_PSTATE_CPUFREQ_H +#ifndef AMD_PSTATE_CPUFREQ_H +#define AMD_PSTATE_CPUFREQ_H /** * @file - * RTE Power Management via userspace AMD pstate cpufreq + * Power Management via userspace AMD pstate cpufreq */ -#include "rte_power.h" +#include "power_cpufreq.h" /** * Check if amd p-state power management is supported. @@ -216,4 +216,4 @@ int power_amd_pstate_disable_turbo(unsigned int lcore_id); int power_amd_pstate_get_capabilities(unsigned int lcore_id, struct rte_power_core_capabilities *caps); -#endif /* _POWER_AMD_PSTATET_CPUFREQ_H */ +#endif /* AMD_PSTATE_CPUFREQ_H */ diff --git a/drivers/power/amd_pstate/meson.build b/drivers/power/amd_pstate/meson.build new file mode 100644 index 0000000000..acaf20b388 --- /dev/null +++ b/drivers/power/amd_pstate/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +if not is_linux + build = false + reason = 'only supported on Linux' +endif +sources = files('amd_pstate_cpufreq.c') + +deps += ['power'] diff --git a/drivers/power/amd_uncore/amd_uncore.c b/drivers/power/amd_uncore/amd_uncore.c new file mode 100644 index 0000000000..c3e95cdc08 --- /dev/null +++ b/drivers/power/amd_uncore/amd_uncore.c @@ -0,0 +1,329 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Advanced Micro Devices, Inc. + */ + +#include +#include +#include + +#include + +#include "amd_uncore.h" +#include "power_common.h" +#include "e_smi/e_smi.h" + +#define MAX_NUMA_DIE 8 + +struct __rte_cache_aligned uncore_power_info { + unsigned int die; /* Core die id */ + unsigned int pkg; /* Package id */ + uint32_t freqs[RTE_MAX_UNCORE_FREQS]; /* Frequency array */ + uint32_t nb_freqs; /* Number of available freqs */ + uint32_t curr_idx; /* Freq index in freqs array */ + uint32_t max_freq; /* System max uncore freq */ + uint32_t min_freq; /* System min uncore freq */ +}; + +static struct uncore_power_info uncore_info[RTE_MAX_NUMA_NODES][MAX_NUMA_DIE]; +static int esmi_initialized; +static unsigned int hsmp_proto_ver; + +static int +set_uncore_freq_internal(struct uncore_power_info *ui, uint32_t idx) +{ + int ret; + + if (idx >= RTE_MAX_UNCORE_FREQS || idx >= ui->nb_freqs) { + POWER_LOG(DEBUG, "Invalid uncore frequency index %u, which " + "should be less than %u", idx, ui->nb_freqs); + return -1; + } + + ret = esmi_apb_disable(ui->pkg, idx); + if (ret != ESMI_SUCCESS) { + POWER_LOG(ERR, "DF P-state '%u' set failed for pkg %02u", + idx, ui->pkg); + return -1; + } + + POWER_DEBUG_LOG("DF P-state '%u' to be set for pkg %02u die %02u", + idx, ui->pkg, ui->die); + + /* write the minimum value first if the target freq is less than current max */ + ui->curr_idx = idx; + + return 0; +} + +static int +power_init_for_setting_uncore_freq(struct uncore_power_info *ui) +{ + switch (hsmp_proto_ver) { + case HSMP_PROTO_VER5: + ui->max_freq = 1800000; /* Hz */ + ui->min_freq = 1200000; /* Hz */ + break; + case HSMP_PROTO_VER2: + default: + ui->max_freq = 1600000; /* Hz */ + ui->min_freq = 1200000; /* Hz */ + } + + return 0; +} + +/* + * Get the available uncore frequencies of the specific die. + */ +static int +power_get_available_uncore_freqs(struct uncore_power_info *ui) +{ + ui->nb_freqs = 3; + if (ui->nb_freqs >= RTE_MAX_UNCORE_FREQS) { + POWER_LOG(ERR, "Too many available uncore frequencies: %d", + ui->nb_freqs); + return -1; + } + + /* Generate the uncore freq bucket array. */ + switch (hsmp_proto_ver) { + case HSMP_PROTO_VER5: + ui->freqs[0] = 1800000; + ui->freqs[1] = 1440000; + ui->freqs[2] = 1200000; + break; + case HSMP_PROTO_VER2: + default: + ui->freqs[0] = 1600000; + ui->freqs[1] = 1333000; + ui->freqs[2] = 1200000; + } + + POWER_DEBUG_LOG("%d frequency(s) of pkg %02u die %02u are available", + ui->num_uncore_freqs, ui->pkg, ui->die); + + return 0; +} + +static int +check_pkg_die_values(unsigned int pkg, unsigned int die) +{ + unsigned int max_pkgs, max_dies; + max_pkgs = power_amd_uncore_get_num_pkgs(); + if (max_pkgs == 0) + return -1; + if (pkg >= max_pkgs) { + POWER_LOG(DEBUG, "Package number %02u can not exceed %u", + pkg, max_pkgs); + return -1; + } + + max_dies = power_amd_uncore_get_num_dies(pkg); + if (max_dies == 0) + return -1; + if (die >= max_dies) { + POWER_LOG(DEBUG, "Die number %02u can not exceed %u", + die, max_dies); + return -1; + } + + return 0; +} + +static void +power_amd_uncore_esmi_init(void) +{ + if (esmi_init() == ESMI_SUCCESS) { + if (esmi_hsmp_proto_ver_get(&hsmp_proto_ver) == + ESMI_SUCCESS) + esmi_initialized = 1; + } +} + +int +power_amd_uncore_init(unsigned int pkg, unsigned int die) +{ + struct uncore_power_info *ui; + int ret; + + if (!esmi_initialized) { + ret = esmi_init(); + if (ret != ESMI_SUCCESS) { + POWER_LOG(DEBUG, "ESMI Not initialized, drivers not found"); + return -1; + } + ret = esmi_hsmp_proto_ver_get(&hsmp_proto_ver); + if (ret != ESMI_SUCCESS) { + POWER_LOG(DEBUG, "HSMP Proto Version Get failed with " + "error %s", esmi_get_err_msg(ret)); + esmi_exit(); + return -1; + } + esmi_initialized = 1; + } + + ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + ui = &uncore_info[pkg][die]; + ui->die = die; + ui->pkg = pkg; + + /* Init for setting uncore die frequency */ + if (power_init_for_setting_uncore_freq(ui) < 0) { + POWER_LOG(DEBUG, "Cannot init for setting uncore frequency for " + "pkg %02u die %02u", pkg, die); + return -1; + } + + /* Get the available frequencies */ + if (power_get_available_uncore_freqs(ui) < 0) { + POWER_LOG(DEBUG, "Cannot get available uncore frequencies of " + "pkg %02u die %02u", pkg, die); + return -1; + } + + return 0; +} + +int +power_amd_uncore_exit(unsigned int pkg, unsigned int die) +{ + struct uncore_power_info *ui; + + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + ui = &uncore_info[pkg][die]; + ui->nb_freqs = 0; + + if (esmi_initialized) { + esmi_exit(); + esmi_initialized = 0; + } + + return 0; +} + +uint32_t +power_get_amd_uncore_freq(unsigned int pkg, unsigned int die) +{ + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + return uncore_info[pkg][die].curr_idx; +} + +int +power_set_amd_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index) +{ + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + return set_uncore_freq_internal(&(uncore_info[pkg][die]), index); +} + +int +power_amd_uncore_freq_max(unsigned int pkg, unsigned int die) +{ + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + return set_uncore_freq_internal(&(uncore_info[pkg][die]), 0); +} + + +int +power_amd_uncore_freq_min(unsigned int pkg, unsigned int die) +{ + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + struct uncore_power_info *ui = &uncore_info[pkg][die]; + + return set_uncore_freq_internal(&(uncore_info[pkg][die]), ui->nb_freqs - 1); +} + +int +power_amd_uncore_freqs(unsigned int pkg, unsigned int die, uint32_t *freqs, uint32_t num) +{ + struct uncore_power_info *ui; + + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + if (freqs == NULL) { + POWER_LOG(ERR, "NULL buffer supplied"); + return 0; + } + + ui = &uncore_info[pkg][die]; + if (num < ui->nb_freqs) { + POWER_LOG(ERR, "Buffer size is not enough"); + return 0; + } + rte_memcpy(freqs, ui->freqs, ui->nb_freqs * sizeof(uint32_t)); + + return ui->nb_freqs; +} + +int +power_amd_uncore_get_num_freqs(unsigned int pkg, unsigned int die) +{ + int ret = check_pkg_die_values(pkg, die); + if (ret < 0) + return -1; + + return uncore_info[pkg][die].nb_freqs; +} + +unsigned int +power_amd_uncore_get_num_pkgs(void) +{ + uint32_t num_pkgs = 0; + int ret; + + if (esmi_initialized) { + ret = esmi_number_of_sockets_get(&num_pkgs); + if (ret != ESMI_SUCCESS) { + POWER_LOG(ERR, "Failed to get number of sockets"); + num_pkgs = 0; + } + } + return num_pkgs; +} + +unsigned int +power_amd_uncore_get_num_dies(unsigned int pkg) +{ + if (pkg >= power_amd_uncore_get_num_pkgs()) { + POWER_LOG(ERR, "Invalid package ID"); + return 0; + } + + return 1; +} + +static struct rte_power_uncore_ops amd_uncore_ops = { + .name = "amd-hsmp", + .cb = power_amd_uncore_esmi_init, + .init = power_amd_uncore_init, + .exit = power_amd_uncore_exit, + .get_avail_freqs = power_amd_uncore_freqs, + .get_num_pkgs = power_amd_uncore_get_num_pkgs, + .get_num_dies = power_amd_uncore_get_num_dies, + .get_num_freqs = power_amd_uncore_get_num_freqs, + .get_freq = power_get_amd_uncore_freq, + .set_freq = power_set_amd_uncore_freq, + .freq_max = power_amd_uncore_freq_max, + .freq_min = power_amd_uncore_freq_min, +}; + +RTE_POWER_REGISTER_UNCORE_OPS(amd_uncore_ops); diff --git a/drivers/power/amd_uncore/amd_uncore.h b/drivers/power/amd_uncore/amd_uncore.h new file mode 100644 index 0000000000..9431c6b85e --- /dev/null +++ b/drivers/power/amd_uncore/amd_uncore.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Advanced Micro Devices, Inc. + */ + +#ifndef POWER_AMD_UNCORE_H +#define POWER_AMD_UNCORE_H + +/** + * @file + * AMD Uncore Frequency Management + */ + +#include "power_uncore_ops.h" + +/** + * Initialize uncore frequency management for specific die on a package. + * It will get the available frequencies and prepare to set new die frequencies. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 0 on success. + * - Negative on error. + */ +int +power_amd_uncore_init(unsigned int pkg, unsigned int die); + +/** + * Exit uncore frequency management on a specific die on a package. + * It will restore uncore min and* max values to previous values + * before initialization of API. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 0 on success. + * - Negative on error. + */ +int +power_amd_uncore_exit(unsigned int pkg, unsigned int die); + +/** + * Return the current index of available frequencies of a specific die on a package. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * The current index of available frequencies. + * If error, it will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)'. + */ +uint32_t +power_get_amd_uncore_freq(unsigned int pkg, unsigned int die); + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to specified index value. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param index + * The index of available frequencies. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int +power_set_amd_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index); + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to maximum value according to the available frequencies. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int +power_amd_uncore_freq_max(unsigned int pkg, unsigned int die); + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to minimum value according to the available frequencies. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +int +power_amd_uncore_freq_min(unsigned int pkg, unsigned int die); + +/** + * Return the list of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param freqs + * The buffer array to save the frequencies. + * @param num + * The number of frequencies to get. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +int +power_amd_uncore_freqs(unsigned int pkg, unsigned int die, + unsigned int *freqs, unsigned int num); + +/** + * Return the list length of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +int +power_amd_uncore_get_num_freqs(unsigned int pkg, unsigned int die); + +/** + * Return the number of packages (CPUs) on a system + * by parsing the uncore sysfs directory. + * + * This function should NOT be called in the fast path. + * + * @return + * - Zero on error. + * - Number of package on system on success. + */ +unsigned int +power_amd_uncore_get_num_pkgs(void); + +/** + * Return the number of dies for pakckages (CPUs) specified + * from parsing the uncore sysfs directory. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * + * @return + * - Zero on error. + * - Number of dies for package on sucecss. + */ +unsigned int +power_amd_uncore_get_num_dies(unsigned int pkg); + +#endif /* POWER_INTEL_UNCORE_H */ diff --git a/drivers/power/amd_uncore/meson.build b/drivers/power/amd_uncore/meson.build new file mode 100644 index 0000000000..8cbab47b01 --- /dev/null +++ b/drivers/power/amd_uncore/meson.build @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +if not is_linux + build = false + reason = 'only supported on Linux' + subdir_done() +endif + +ESMI_header = '#include' +lib = cc.find_library('e_smi64', required: false) +if not lib.found() + build = false + reason = 'missing dependency, "libe_smi"' +else + ext_deps += lib +endif + +sources = files('amd_uncore.c') +deps += ['power'] diff --git a/lib/power/power_cppc_cpufreq.c b/drivers/power/cppc/cppc_cpufreq.c similarity index 95% rename from lib/power/power_cppc_cpufreq.c rename to drivers/power/cppc/cppc_cpufreq.c index cc9305bdfe..3cd4165c83 100644 --- a/lib/power/power_cppc_cpufreq.c +++ b/drivers/power/cppc/cppc_cpufreq.c @@ -8,7 +8,7 @@ #include #include -#include "power_cppc_cpufreq.h" +#include "cppc_cpufreq.h" #include "power_common.h" /* macros used for rounding frequency to nearest 100000 */ @@ -695,3 +695,23 @@ power_cppc_get_capabilities(unsigned int lcore_id, return 0; } + +static struct rte_power_cpufreq_ops cppc_ops = { + .name = "cppc", + .init = power_cppc_cpufreq_init, + .exit = power_cppc_cpufreq_exit, + .check_env_support = power_cppc_cpufreq_check_supported, + .get_avail_freqs = power_cppc_cpufreq_freqs, + .get_freq = power_cppc_cpufreq_get_freq, + .set_freq = power_cppc_cpufreq_set_freq, + .freq_down = power_cppc_cpufreq_freq_down, + .freq_up = power_cppc_cpufreq_freq_up, + .freq_max = power_cppc_cpufreq_freq_max, + .freq_min = power_cppc_cpufreq_freq_min, + .turbo_status = power_cppc_turbo_status, + .enable_turbo = power_cppc_enable_turbo, + .disable_turbo = power_cppc_disable_turbo, + .get_caps = power_cppc_get_capabilities +}; + +RTE_POWER_REGISTER_CPUFREQ_OPS(cppc_ops); diff --git a/lib/power/power_cppc_cpufreq.h b/drivers/power/cppc/cppc_cpufreq.h similarity index 96% rename from lib/power/power_cppc_cpufreq.h rename to drivers/power/cppc/cppc_cpufreq.h index f4121b237e..810ddff2d7 100644 --- a/lib/power/power_cppc_cpufreq.h +++ b/drivers/power/cppc/cppc_cpufreq.h @@ -3,15 +3,15 @@ * Copyright(c) 2021 Arm Limited */ -#ifndef _POWER_CPPC_CPUFREQ_H -#define _POWER_CPPC_CPUFREQ_H +#ifndef CPPC_CPUFREQ_H +#define CPPC_CPUFREQ_H /** * @file - * RTE Power Management via userspace CPPC cpufreq + * Power Management via userspace CPPC cpufreq */ -#include "rte_power.h" +#include "power_cpufreq.h" /** * Check if CPPC power management is supported. @@ -215,4 +215,4 @@ int power_cppc_disable_turbo(unsigned int lcore_id); int power_cppc_get_capabilities(unsigned int lcore_id, struct rte_power_core_capabilities *caps); -#endif /* _POWER_CPPC_CPUFREQ_H */ +#endif /* CPPC_CPUFREQ_H */ diff --git a/drivers/power/cppc/meson.build b/drivers/power/cppc/meson.build new file mode 100644 index 0000000000..f1948cd424 --- /dev/null +++ b/drivers/power/cppc/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +if not is_linux + build = false + reason = 'only supported on Linux' +endif +sources = files('cppc_cpufreq.c') + +deps += ['power'] diff --git a/lib/power/power_pstate_cpufreq.c b/drivers/power/intel_pstate/intel_pstate_cpufreq.c similarity index 96% rename from lib/power/power_pstate_cpufreq.c rename to drivers/power/intel_pstate/intel_pstate_cpufreq.c index 4755909466..eba50b2874 100644 --- a/lib/power/power_pstate_cpufreq.c +++ b/drivers/power/intel_pstate/intel_pstate_cpufreq.c @@ -15,7 +15,7 @@ #include #include "rte_power_pmd_mgmt.h" -#include "power_pstate_cpufreq.h" +#include "intel_pstate_cpufreq.h" #include "power_common.h" /* macros used for rounding frequency to nearest 100000 */ @@ -898,3 +898,23 @@ int power_pstate_get_capabilities(unsigned int lcore_id, return 0; } + +static struct rte_power_cpufreq_ops pstate_ops = { + .name = "intel-pstate", + .init = power_pstate_cpufreq_init, + .exit = power_pstate_cpufreq_exit, + .check_env_support = power_pstate_cpufreq_check_supported, + .get_avail_freqs = power_pstate_cpufreq_freqs, + .get_freq = power_pstate_cpufreq_get_freq, + .set_freq = power_pstate_cpufreq_set_freq, + .freq_down = power_pstate_cpufreq_freq_down, + .freq_up = power_pstate_cpufreq_freq_up, + .freq_max = power_pstate_cpufreq_freq_max, + .freq_min = power_pstate_cpufreq_freq_min, + .turbo_status = power_pstate_turbo_status, + .enable_turbo = power_pstate_enable_turbo, + .disable_turbo = power_pstate_disable_turbo, + .get_caps = power_pstate_get_capabilities +}; + +RTE_POWER_REGISTER_CPUFREQ_OPS(pstate_ops); diff --git a/lib/power/power_pstate_cpufreq.h b/drivers/power/intel_pstate/intel_pstate_cpufreq.h similarity index 96% rename from lib/power/power_pstate_cpufreq.h rename to drivers/power/intel_pstate/intel_pstate_cpufreq.h index 7bf64a518c..6a52ae8528 100644 --- a/lib/power/power_pstate_cpufreq.h +++ b/drivers/power/intel_pstate/intel_pstate_cpufreq.h @@ -2,15 +2,15 @@ * Copyright(c) 2018 Intel Corporation */ -#ifndef _POWER_PSTATE_CPUFREQ_H -#define _POWER_PSTATE_CPUFREQ_H +#ifndef INTEL_PSTATE_CPUFREQ_H +#define INTEL_PSTATE_CPUFREQ_H /** * @file - * RTE Power Management via Intel Pstate driver + * Power Management via Intel Pstate driver */ -#include "rte_power.h" +#include "power_cpufreq.h" /** * Check if pstate power management is supported. @@ -214,4 +214,4 @@ int power_pstate_disable_turbo(unsigned int lcore_id); int power_pstate_get_capabilities(unsigned int lcore_id, struct rte_power_core_capabilities *caps); -#endif +#endif /* INTEL_PSTATE_CPUFREQ_H */ diff --git a/drivers/power/intel_pstate/meson.build b/drivers/power/intel_pstate/meson.build new file mode 100644 index 0000000000..c5132809b1 --- /dev/null +++ b/drivers/power/intel_pstate/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +if not is_linux + build = false + reason = 'only supported on Linux' +endif +sources = files('intel_pstate_cpufreq.c') + +deps += ['power'] diff --git a/lib/power/power_intel_uncore.c b/drivers/power/intel_uncore/intel_uncore.c similarity index 95% rename from lib/power/power_intel_uncore.c rename to drivers/power/intel_uncore/intel_uncore.c index 4eb9c5900a..804ad5d755 100644 --- a/lib/power/power_intel_uncore.c +++ b/drivers/power/intel_uncore/intel_uncore.c @@ -8,7 +8,7 @@ #include -#include "power_intel_uncore.h" +#include "intel_uncore.h" #include "power_common.h" #define MAX_NUMA_DIE 8 @@ -475,3 +475,19 @@ power_intel_uncore_get_num_dies(unsigned int pkg) return count; } + +static struct rte_power_uncore_ops intel_uncore_ops = { + .name = "intel-uncore", + .init = power_intel_uncore_init, + .exit = power_intel_uncore_exit, + .get_avail_freqs = power_intel_uncore_freqs, + .get_num_pkgs = power_intel_uncore_get_num_pkgs, + .get_num_dies = power_intel_uncore_get_num_dies, + .get_num_freqs = power_intel_uncore_get_num_freqs, + .get_freq = power_get_intel_uncore_freq, + .set_freq = power_set_intel_uncore_freq, + .freq_max = power_intel_uncore_freq_max, + .freq_min = power_intel_uncore_freq_min, +}; + +RTE_POWER_REGISTER_UNCORE_OPS(intel_uncore_ops); diff --git a/lib/power/power_intel_uncore.h b/drivers/power/intel_uncore/intel_uncore.h similarity index 95% rename from lib/power/power_intel_uncore.h rename to drivers/power/intel_uncore/intel_uncore.h index 20a3ba8ebe..58abc4c988 100644 --- a/lib/power/power_intel_uncore.h +++ b/drivers/power/intel_uncore/intel_uncore.h @@ -2,20 +2,15 @@ * Copyright(c) 2022 Intel Corporation */ -#ifndef POWER_INTEL_UNCORE_H -#define POWER_INTEL_UNCORE_H +#ifndef INTEL_UNCORE_H +#define INTEL_UNCORE_H /** * @file - * RTE Intel Uncore Frequency Management + * Intel Uncore Frequency Management */ -#include "rte_power.h" -#include "rte_power_uncore.h" - -#ifdef __cplusplus -extern "C" { -#endif +#include "power_uncore_ops.h" /** * Initialize uncore frequency management for specific die on a package. @@ -219,8 +214,4 @@ power_intel_uncore_get_num_pkgs(void); unsigned int power_intel_uncore_get_num_dies(unsigned int pkg); -#ifdef __cplusplus -} -#endif - -#endif /* POWER_INTEL_UNCORE_H */ +#endif /* INTEL_UNCORE_H */ diff --git a/drivers/power/intel_uncore/meson.build b/drivers/power/intel_uncore/meson.build new file mode 100644 index 0000000000..876df8ad14 --- /dev/null +++ b/drivers/power/intel_uncore/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +sources = files('intel_uncore.c') +deps += ['power'] diff --git a/lib/power/guest_channel.c b/drivers/power/kvm_vm/guest_channel.c similarity index 99% rename from lib/power/guest_channel.c rename to drivers/power/kvm_vm/guest_channel.c index bc3f55b6bf..35cd4cfe6f 100644 --- a/lib/power/guest_channel.c +++ b/drivers/power/kvm_vm/guest_channel.c @@ -13,7 +13,7 @@ #include -#include +#include #include "guest_channel.h" diff --git a/lib/power/guest_channel.h b/drivers/power/kvm_vm/guest_channel.h similarity index 95% rename from lib/power/guest_channel.h rename to drivers/power/kvm_vm/guest_channel.h index 409fa67b74..b3f581496a 100644 --- a/lib/power/guest_channel.h +++ b/drivers/power/kvm_vm/guest_channel.h @@ -1,8 +1,9 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation */ -#ifndef _GUEST_CHANNEL_H -#define _GUEST_CHANNEL_H + +#ifndef GUEST_CHANNEL_H +#define GUEST_CHANNEL_H /** * Check if any Virtio-Serial VM end-points exist in path. @@ -81,4 +82,4 @@ int power_guest_channel_read_msg(void *pkt, size_t pkt_len, unsigned int lcore_id); -#endif +#endif /* GUEST_CHANNEL_H */ diff --git a/lib/power/power_kvm_vm.c b/drivers/power/kvm_vm/kvm_vm.c similarity index 82% rename from lib/power/power_kvm_vm.c rename to drivers/power/kvm_vm/kvm_vm.c index f15be8fac5..5754a441cd 100644 --- a/lib/power/power_kvm_vm.c +++ b/drivers/power/kvm_vm/kvm_vm.c @@ -9,7 +9,7 @@ #include "rte_power_guest_channel.h" #include "guest_channel.h" #include "power_common.h" -#include "power_kvm_vm.h" +#include "kvm_vm.h" #define FD_PATH "/dev/virtio-ports/virtio.serial.port.poweragent" @@ -137,3 +137,23 @@ int power_kvm_vm_get_capabilities(__rte_unused unsigned int lcore_id, POWER_LOG(ERR, "rte_power_get_capabilities is not implemented for Virtual Machine Power Management"); return -ENOTSUP; } + +static struct rte_power_cpufreq_ops kvm_vm_ops = { + .name = "kvm-vm", + .init = power_kvm_vm_init, + .exit = power_kvm_vm_exit, + .check_env_support = power_kvm_vm_check_supported, + .get_avail_freqs = power_kvm_vm_freqs, + .get_freq = power_kvm_vm_get_freq, + .set_freq = power_kvm_vm_set_freq, + .freq_down = power_kvm_vm_freq_down, + .freq_up = power_kvm_vm_freq_up, + .freq_max = power_kvm_vm_freq_max, + .freq_min = power_kvm_vm_freq_min, + .turbo_status = power_kvm_vm_turbo_status, + .enable_turbo = power_kvm_vm_enable_turbo, + .disable_turbo = power_kvm_vm_disable_turbo, + .get_caps = power_kvm_vm_get_capabilities +}; + +RTE_POWER_REGISTER_CPUFREQ_OPS(kvm_vm_ops); diff --git a/lib/power/power_kvm_vm.h b/drivers/power/kvm_vm/kvm_vm.h similarity index 97% rename from lib/power/power_kvm_vm.h rename to drivers/power/kvm_vm/kvm_vm.h index 303fcc041b..eaccdeea4b 100644 --- a/lib/power/power_kvm_vm.h +++ b/drivers/power/kvm_vm/kvm_vm.h @@ -2,15 +2,15 @@ * Copyright(c) 2010-2014 Intel Corporation */ -#ifndef _POWER_KVM_VM_H -#define _POWER_KVM_VM_H +#ifndef KVM_VM_H +#define KVM_VM_H /** * @file - * RTE Power Management KVM VM + * Power Management KVM VM */ -#include "rte_power.h" +#include "power_cpufreq.h" /** * Check if KVM power management is supported. @@ -196,4 +196,4 @@ int power_kvm_vm_disable_turbo(unsigned int lcore_id); int power_kvm_vm_get_capabilities(unsigned int lcore_id, struct rte_power_core_capabilities *caps); -#endif +#endif /* KVM_VM_H */ diff --git a/drivers/power/kvm_vm/meson.build b/drivers/power/kvm_vm/meson.build new file mode 100644 index 0000000000..e921c012e9 --- /dev/null +++ b/drivers/power/kvm_vm/meson.build @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +if not is_linux + build = false + reason = 'only supported on Linux' + subdir_done() +endif +sources = files( + 'guest_channel.c', + 'kvm_vm.c', +) +headers = files('rte_power_guest_channel.h') + +deps += ['power'] diff --git a/lib/power/rte_power_guest_channel.h b/drivers/power/kvm_vm/rte_power_guest_channel.h similarity index 100% rename from lib/power/rte_power_guest_channel.h rename to drivers/power/kvm_vm/rte_power_guest_channel.h diff --git a/drivers/power/kvm_vm/version.map b/drivers/power/kvm_vm/version.map new file mode 100644 index 0000000000..ffa676624b --- /dev/null +++ b/drivers/power/kvm_vm/version.map @@ -0,0 +1,8 @@ +DPDK_25 { + global: + + rte_power_guest_channel_receive_msg; + rte_power_guest_channel_send_msg; + + local: *; +}; diff --git a/drivers/power/meson.build b/drivers/power/meson.build new file mode 100644 index 0000000000..0a703bce38 --- /dev/null +++ b/drivers/power/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2024 Advanced Micro Devices, Inc. + +drivers = [ + 'acpi', + 'amd_pstate', + 'amd_uncore', + 'cppc', + 'intel_pstate', + 'intel_uncore', + 'kvm_vm', +] + +std_deps = ['power'] diff --git a/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c new file mode 100644 index 0000000000..54c58a8552 --- /dev/null +++ b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cnxk_rvu_lf.h" +#include "cnxk_rvu_lf_driver.h" + +int +rte_pmd_rvu_lf_msg_id_range_set(uint8_t dev_id, uint16_t from, uint16_t to) +{ + struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id); + struct roc_rvu_lf *roc_rvu_lf; + + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_msg_id_range_set(roc_rvu_lf, from, to); +} + +int +rte_pmd_rvu_lf_msg_process(uint8_t dev_id, uint16_t vf, uint16_t msg_id, + void *req, uint16_t req_len, void *rsp, uint16_t rsp_len) +{ + struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id); + struct roc_rvu_lf *roc_rvu_lf; + + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_msg_process(roc_rvu_lf, vf, msg_id, req, req_len, rsp, rsp_len); +} + +int +rte_pmd_rvu_lf_msg_handler_register(uint8_t dev_id, rte_pmd_rvu_lf_msg_handler_cb_fn cb) +{ + struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id); + struct roc_rvu_lf *roc_rvu_lf; + + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_msg_handler_register(roc_rvu_lf, (roc_rvu_lf_msg_handler_cb_fn)cb); +} + +int +rte_pmd_rvu_lf_msg_handler_unregister(uint8_t dev_id) +{ + struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id); + struct roc_rvu_lf *roc_rvu_lf; + + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_msg_handler_unregister(roc_rvu_lf); +} + +int +rte_pmd_rvu_lf_irq_register(uint8_t dev_id, unsigned int irq, + rte_pmd_rvu_lf_intr_callback_fn cb, void *data) +{ + struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id); + struct roc_rvu_lf *roc_rvu_lf; + + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_irq_register(roc_rvu_lf, irq, (roc_rvu_lf_intr_cb_fn)cb, data); +} + +int +rte_pmd_rvu_lf_irq_unregister(uint8_t dev_id, unsigned int irq, + rte_pmd_rvu_lf_intr_callback_fn cb, void *data) +{ + struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id); + struct roc_rvu_lf *roc_rvu_lf; + + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_irq_unregister(roc_rvu_lf, irq, (roc_rvu_lf_intr_cb_fn)cb, data); +} + +int +rte_pmd_rvu_lf_bar_get(uint8_t dev_id, uint8_t bar_num, size_t *va, size_t *mask) +{ + struct roc_rvu_lf *roc_rvu_lf; + struct rte_rawdev *rawdev; + + rawdev = rte_rawdev_pmd_get_dev(dev_id); + if (rawdev == NULL) + return -EINVAL; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + if (bar_num > PCI_MAX_RESOURCE || + (roc_rvu_lf->pci_dev->mem_resource[bar_num].addr == NULL)) { + *va = 0; + *mask = 0; + return -ENOTSUP; + } + *va = (size_t)(roc_rvu_lf->pci_dev->mem_resource[bar_num].addr); + *mask = (size_t)(roc_rvu_lf->pci_dev->mem_resource[bar_num].len - 1); + + return 0; +} + +uint16_t +rte_pmd_rvu_lf_npa_pf_func_get(void) +{ + return roc_npa_pf_func_get(); +} + +uint16_t +rte_pmd_rvu_lf_sso_pf_func_get(void) +{ + return roc_sso_pf_func_get(); +} + +uint16_t +rte_pmd_rvu_lf_pf_func_get(uint8_t dev_id) +{ + struct roc_rvu_lf *roc_rvu_lf; + struct rte_rawdev *rawdev; + + rawdev = rte_rawdev_pmd_get_dev(dev_id); + if (rawdev == NULL) + return 0; + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + + return roc_rvu_lf_pf_func_get(roc_rvu_lf); +} + +static const struct rte_rawdev_ops rvu_lf_rawdev_ops = { + .dev_selftest = rvu_lf_rawdev_selftest, +}; + +static void +rvu_lf_rawdev_get_name(char *name, struct rte_pci_device *pci_dev) +{ + snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "RVU LF:%02x:%02x.%x", + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function); +} + +static int +rvu_lf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + char name[RTE_RAWDEV_NAME_MAX_LEN]; + struct rte_rawdev *rvu_lf_rawdev; + struct roc_rvu_lf *roc_rvu_lf; + int ret; + + RTE_SET_USED(pci_drv); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!pci_dev->mem_resource[2].addr) { + CNXK_RVU_LF_LOG(ERR, "BARs have invalid values: BAR0 %p, BAR2 %p", + pci_dev->mem_resource[2].addr, pci_dev->mem_resource[4].addr); + return -ENODEV; + } + + ret = roc_plt_init(); + if (ret) + return ret; + + rvu_lf_rawdev_get_name(name, pci_dev); + rvu_lf_rawdev = rte_rawdev_pmd_allocate(name, sizeof(*roc_rvu_lf), + rte_socket_id()); + if (rvu_lf_rawdev == NULL) { + CNXK_RVU_LF_LOG(ERR, "Failed to allocate rawdev"); + return -ENOMEM; + } + + rvu_lf_rawdev->dev_ops = &rvu_lf_rawdev_ops; + rvu_lf_rawdev->device = &pci_dev->device; + rvu_lf_rawdev->driver_name = pci_dev->driver->driver.name; + + roc_rvu_lf = (struct roc_rvu_lf *)rvu_lf_rawdev->dev_private; + roc_rvu_lf->pci_dev = pci_dev; + + ret = roc_rvu_lf_dev_init(roc_rvu_lf); + if (ret) { + rte_rawdev_pmd_release(rvu_lf_rawdev); + return ret; + } + + return 0; +} + +static int +rvu_lf_remove(struct rte_pci_device *pci_dev) +{ + char name[RTE_RAWDEV_NAME_MAX_LEN]; + struct roc_rvu_lf *roc_rvu_lf; + struct rte_rawdev *rawdev; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev == NULL) { + CNXK_RVU_LF_LOG(ERR, "invalid pci_dev"); + return -EINVAL; + } + + rvu_lf_rawdev_get_name(name, pci_dev); + rawdev = rte_rawdev_pmd_get_named_dev(name); + if (rawdev == NULL) { + CNXK_RVU_LF_LOG(ERR, "invalid device name (%s)", name); + return -EINVAL; + } + + roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private; + roc_rvu_lf_dev_fini(roc_rvu_lf); + + return rte_rawdev_pmd_release(rawdev); +} + +static const struct rte_pci_id pci_rvu_lf_map[] = { + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF20KA, PCI_DEVID_CNXK_RVU_BPHY_PF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF20KA, PCI_DEVID_CNXK_RVU_BPHY_VF), + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver cnxk_rvu_lf_rawdev_pmd = { + .id_table = pci_rvu_lf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA, + .probe = rvu_lf_probe, + .remove = rvu_lf_remove, +}; + +RTE_PMD_REGISTER_PCI(rvu_lf_rawdev_pci_driver, cnxk_rvu_lf_rawdev_pmd); +RTE_PMD_REGISTER_PCI_TABLE(rvu_lf_rawdev_pci_driver, pci_rvu_lf_map); +RTE_PMD_REGISTER_KMOD_DEP(rvu_lf_rawdev_pci_driver, "vfio-pci"); +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_rvu_lf, rvu_lf, INFO); diff --git a/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.h b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.h new file mode 100644 index 0000000000..e64643dcee --- /dev/null +++ b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef _CNXK_RVU_LF_H_ +#define _CNXK_RVU_LF_H_ + +#include + +#include + +/** + * @file cnxk_rvu_lf.h + * + * Marvell RVU LF raw PMD specific internal structures + * + */ + +extern int cnxk_logtype_rvu_lf; +#define RTE_LOGTYPE_CNXK_RVU_LF cnxk_logtype_rvu_lf +#define CNXK_RVU_LF_LOG(level, ...) \ + RTE_LOG_LINE_PREFIX(level, CNXK_RVU_LF, "%s(): ", __func__, __VA_ARGS__) + +int rvu_lf_rawdev_selftest(uint16_t dev_id); + +#endif /* _CNXK_RVU_LF_H_ */ diff --git a/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf_driver.h b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf_driver.h new file mode 100644 index 0000000000..28410a74cd --- /dev/null +++ b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf_driver.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef _CNXK_RVU_LF_DRIVER_H_ +#define _CNXK_RVU_LF_DRIVER_H_ + +/** + * @file cnxk_rvu_lf_driver.h + * + * Marvell RVU LF raw PMD specific structures and interface + * + * This API allows out of tree driver to manage RVU LF device + * It enables operations such as sending/receiving mailbox, + * register and notify the interrupts etc + */ + +#include + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Obtain NPA PF func + * + * @return + * Returns NPA pf_func on success, 0 in case of invalid pf_func. + */ +__rte_internal +uint16_t rte_pmd_rvu_lf_npa_pf_func_get(void); + +/** + * Obtain SSO PF func + * + * @return + * Returns SSO pf_func on success, 0 in case of invalid pf_func. + */ +__rte_internal +uint16_t rte_pmd_rvu_lf_sso_pf_func_get(void); + +/** + * Obtain RVU LF device PF func + * + * @param dev_id + * device id of RVU LF device + * + * @return + * Returns RVU LF pf_func on success, 0 in case of invalid pf_func. + */ +__rte_internal +uint16_t rte_pmd_rvu_lf_pf_func_get(uint8_t dev_id); + +/** + * Signature of callback function called when an interrupt is received on RVU LF device. + * + * @param cb_arg + * pointer to the information received on an interrupt + */ +typedef void (*rte_pmd_rvu_lf_intr_callback_fn)(void *cb_arg); + +/** + * Register interrupt callback + * + * Registers an interrupt callback to be executed when interrupt is raised. + * + * @param dev_id + * device id of RVU LF device + * @param irq + * interrupt number for which interrupt will be raised + * @param cb + * callback function to be executed + * @param cb_arg + * argument to be passed to callback function + * + * @return 0 on success, negative value otherwise + */ +__rte_internal +int rte_pmd_rvu_lf_irq_register(uint8_t dev_id, unsigned int irq, + rte_pmd_rvu_lf_intr_callback_fn cb, void *cb_arg); + +/** + * Unregister interrupt callback + * + * @param dev_id + * device id of RVU LF device + * @param irq + * interrupt number + * @param cb + * callback function registered + * @param cb_arg + * argument to be passed to callback function + * + * @return 0 on success, negative value otherwise + */ +__rte_internal +int rte_pmd_rvu_lf_irq_unregister(uint8_t dev_id, unsigned int irq, + rte_pmd_rvu_lf_intr_callback_fn cb, void *cb_arg); + +/** + * Signature of callback function called when a message process handler is called + * on RVU LF device. + * + * @param vf + * VF number(0 to N) from which message is received (ignored in case of PF) + * @param msg_id + * message id + * @param req + * pointer to message request + * @param req_len + * pointer to message request + * @param[out] rsp + * pointer to message response + * @param[out] rsp_len + * length of message response + * + * @return 0 when response is set, negative value otherwise + */ +typedef int (*rte_pmd_rvu_lf_msg_handler_cb_fn)(uint16_t vf, uint16_t msg_id, + void *req, uint16_t req_len, + void **rsp, uint16_t *rsp_len); + +/** + * Register message handler callback + * + * Registers message handler callback to be executed when the message is received from peer. + * + * @param dev_id + * device id of RVU LF device + * @param cb + * callback function to be executed + * + * @return 0 on success, negative value otherwise + */ +__rte_internal +int rte_pmd_rvu_lf_msg_handler_register(uint8_t dev_id, rte_pmd_rvu_lf_msg_handler_cb_fn cb); + +/** + * Unregister message handler callback + * + * @param dev_id + * device id of RVU LF device + * + * @return 0 on success, negative value otherwise + */ +__rte_internal +int rte_pmd_rvu_lf_msg_handler_unregister(uint8_t dev_id); + +/** + * Set RVU mailbox message id range. + * + * @param dev_id + * device id of RVU LF device + * @param from + * starting message id for RVU mailbox (> 0x1FF) + * @param to + * last message id for RVU mailbox (< 0xFFFF) + * + * @return 0 on success, -EINVAL for invalid range + */ +__rte_internal +int rte_pmd_rvu_lf_msg_id_range_set(uint8_t dev_id, uint16_t from, uint16_t to); + +/** + * Process a RVU mailbox message. + * + * Message request and response to be sent/received, + * need to be allocated/deallocated by application + * before/after processing the message. + * + * @param dev_id + * device id of RVU LF device + * @param vf + * VF number(0 to N) in case of PF->VF message. 0 is valid as VF0. + * (For VF->PF message, this field is ignored) + * @param msg_id + * message id + * @param req + * pointer to message request data to be sent + * @param req_len + * length of request data + * @param rsp + * pointer to message response expected to be received, NULL if no response + * @param rsp_len + * length of message response expected, 0 if no response + * + * @return 0 on success, negative value otherwise + */ +__rte_internal +int rte_pmd_rvu_lf_msg_process(uint8_t dev_id, uint16_t vf, uint16_t msg_id, + void *req, uint16_t req_len, void *rsp, uint16_t rsp_len); + +/** + * Get BAR addresses for the RVU LF device. + * + * @param dev_id + * device id of RVU LF device + * @param bar_num + * BAR number for which address is required + * @param[out] va + * Virtual address of the BAR. 0 if not mapped + * @param[out] mask + * BAR address mask, 0 if not mapped + * + * @return + * Returns 0 on success, negative error code otherwise + */ +__rte_internal +int rte_pmd_rvu_lf_bar_get(uint8_t dev_id, uint8_t bar_num, size_t *va, size_t *mask); + +#ifdef __cplusplus +} +#endif + +#endif /* _CNXK_RVU_LF_DRIVER_H_ */ diff --git a/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf_selftest.c b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf_selftest.c new file mode 100644 index 0000000000..e33973f24b --- /dev/null +++ b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf_selftest.c @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "cnxk_rvu_lf.h" +#include "cnxk_rvu_lf_driver.h" + +#define PF 0 +#define VF 0 +#define RSP_LEN 64 +#define REQ_LEN 64 +#define MSG_ID_FROM 0x3000 +#define MSG_ID_TO 0x4000 +#define MAX_BAR 6 + +static int +msg_process_notify_cb(uint16_t vf, uint16_t msg_id, + void *req, uint16_t req_len, void **rsp, uint16_t *rsp_len) +{ + uint8_t *resp; + int i; + + printf("\nReceived message(0x%x) from VF0x%x\n", msg_id, vf); + rte_hexdump(stdout, "req_data received", req, req_len); + + resp = malloc(RSP_LEN); + if (resp == NULL) + return -ENOMEM; + for (i = 0; i < RSP_LEN; i++) + resp[i] = 0xB0; + *rsp = resp; + *rsp_len = RSP_LEN; + rte_hexdump(stdout, "rsp_data_filled", *rsp, RSP_LEN); + + return 0; +} + +int +rvu_lf_rawdev_selftest(uint16_t dev_id) +{ + char *dev_name = rte_rawdevs[dev_id].name; + uint8_t req[REQ_LEN] = {0}; + uint8_t rsp[RSP_LEN] = {0}; + size_t bar_mask = 0; + size_t bar_va = 0; + unsigned int i, j; + uint16_t pf_func; + char *token[2]; + int func, ret; + + token[0] = strtok_r(dev_name, ".", &dev_name); + token[1] = strtok_r(dev_name, ".", &dev_name); + func = atoi(token[1]); + + ret = rte_rawdev_start(dev_id); + if (ret) + return ret; + + pf_func = rte_pmd_rvu_lf_npa_pf_func_get(); + if (pf_func == 0) + CNXK_RVU_LF_LOG(WARNING, "NPA pf_func is invalid"); + + pf_func = rte_pmd_rvu_lf_sso_pf_func_get(); + if (pf_func == 0) + CNXK_RVU_LF_LOG(WARNING, "SSO pf_func is invalid"); + + pf_func = rte_pmd_rvu_lf_pf_func_get(dev_id); + if (pf_func == 0) + CNXK_RVU_LF_LOG(WARNING, "RVU-LF pf_func is invalid"); + + for (i = 0; i < MAX_BAR; i++) { + if (!rte_pmd_rvu_lf_bar_get(dev_id, i, &bar_va, &bar_mask)) + printf("\n BAR[%d]: addr: 0x%" PRIx64 ", mask: 0x%" PRIx64 "\n", + i, bar_va, bar_mask); + } + + ret = rte_pmd_rvu_lf_msg_id_range_set(dev_id, MSG_ID_FROM, MSG_ID_TO); + if (ret) { + CNXK_RVU_LF_LOG(ERR, "RVU message ID range invalid"); + goto out; + } + + ret = rte_pmd_rvu_lf_msg_handler_register(dev_id, msg_process_notify_cb); + if (ret) { + CNXK_RVU_LF_LOG(ERR, "RVU message handler register failed, ret: %d", ret); + goto out; + } + + if (func == 0) { + j = 50; + printf("\n"); + while (j--) { + /* PF will wait for RVU message callbacks to be called */ + rte_delay_ms(1000); + printf("PF waiting for VF messages for %d sec.\r", j); + } + /* PF will send the messages and receive responses. */ + for (i = 0; i < REQ_LEN; i++) + req[i] = 0xC0; + /* + * Range is set as between MSG_ID_FROM and MSG_ID_TO. + * Messages sent with this id will be serviced by VF.. + */ + ret = rte_pmd_rvu_lf_msg_process(dev_id, + VF /* Send to VF0 */, + MSG_ID_FROM + 0x2, + req, REQ_LEN, rsp, RSP_LEN); + if (ret) { + CNXK_RVU_LF_LOG(ERR, "rvu lf PF->VF message send failed"); + goto unregister; + } + CNXK_RVU_LF_LOG(INFO, "RVU PF->VF message processed"); + rte_hexdump(stdout, "rsp_data received", rsp, RSP_LEN); + j = 50; + printf("\n"); + while (j--) { + rte_delay_ms(1000); + printf("PF waiting for VF to exit for %d sec.\r", j); + } + + } else { + /* VF will send the messages and receive responses. */ + for (i = 0; i < REQ_LEN; i++) + req[i] = 0xA0; + /* + * Range is set as between MSG_ID_FROM and MSG_ID_TO + * Messages sent with this id will be serviced by PF and will + * not be forwarded to AF. + */ + ret = rte_pmd_rvu_lf_msg_process(dev_id, + PF /* Send to PF */, + MSG_ID_FROM + 0x1, + req, REQ_LEN, rsp, RSP_LEN); + if (ret) { + CNXK_RVU_LF_LOG(ERR, "rvu lf VF->PF message send failed"); + goto unregister; + } + CNXK_RVU_LF_LOG(INFO, "RVU VF->PF message processed"); + rte_hexdump(stdout, "rsp_data received", rsp, RSP_LEN); + j = 50; + printf("\n"); + while (j--) { + rte_delay_ms(1000); + printf("VF waiting for PF to send msg for %d sec.\r", j); + } + } +unregister: + rte_pmd_rvu_lf_msg_handler_unregister(dev_id); +out: + rte_rawdev_stop(dev_id); + + return ret; +} + + diff --git a/drivers/raw/cnxk_rvu_lf/meson.build b/drivers/raw/cnxk_rvu_lf/meson.build new file mode 100644 index 0000000000..c960989bb7 --- /dev/null +++ b/drivers/raw/cnxk_rvu_lf/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(C) 2024 Marvell. +# + +deps += ['bus_pci', 'common_cnxk', 'rawdev'] +sources = files( + 'cnxk_rvu_lf.c', + 'cnxk_rvu_lf_selftest.c', +) +driver_sdk_headers += files('cnxk_rvu_lf_driver.h') +require_iova_in_mbuf = false diff --git a/drivers/raw/cnxk_rvu_lf/version.map b/drivers/raw/cnxk_rvu_lf/version.map new file mode 100644 index 0000000000..012f05d1e4 --- /dev/null +++ b/drivers/raw/cnxk_rvu_lf/version.map @@ -0,0 +1,16 @@ +INTERNAL { + global: + + rte_pmd_rvu_lf_bar_get; + rte_pmd_rvu_lf_irq_register; + rte_pmd_rvu_lf_irq_unregister; + rte_pmd_rvu_lf_msg_handler_register; + rte_pmd_rvu_lf_msg_handler_unregister; + rte_pmd_rvu_lf_msg_id_range_set; + rte_pmd_rvu_lf_msg_process; + rte_pmd_rvu_lf_npa_pf_func_get; + rte_pmd_rvu_lf_pf_func_get; + rte_pmd_rvu_lf_sso_pf_func_get; + + local: *; +}; diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c index de8c024abb..34a3c4f6af 100644 --- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c +++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018-2019 NXP + * Copyright 2018-2019, 2024 NXP */ #include @@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev, cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context); rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]); - dq_storage = rxq->q_storage->dq_storage[0]; + dq_storage = rxq->q_storage[0]->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_fq(&pulldesc, rxq->fqid); diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build index 05cad143fe..54221643d5 100644 --- a/drivers/raw/meson.build +++ b/drivers/raw/meson.build @@ -8,6 +8,7 @@ endif drivers = [ 'cnxk_bphy', 'cnxk_gpio', + 'cnxk_rvu_lf', 'dpaa2_cmdif', 'ifpga', 'ntb', diff --git a/drivers/vdpa/nfp/nfp_vdpa.c b/drivers/vdpa/nfp/nfp_vdpa.c index a32ed2b193..7f2f21ec6c 100644 --- a/drivers/vdpa/nfp/nfp_vdpa.c +++ b/drivers/vdpa/nfp/nfp_vdpa.c @@ -134,7 +134,7 @@ nfp_vdpa_vfio_setup(struct nfp_vdpa_dev *device) if (device->vfio_group_fd < 0) goto container_destroy; - DRV_VDPA_LOG(DEBUG, "container_fd=%d, group_fd=%d,", + DRV_VDPA_LOG(DEBUG, "The container_fd=%d, group_fd=%d.", device->vfio_container_fd, device->vfio_group_fd); ret = rte_pci_map_device(pci_dev); @@ -178,7 +178,7 @@ nfp_vdpa_dma_do_unmap(struct rte_vhost_memory *mem, region->size); if (ret < 0) { /* Here should not return, even error happened. */ - DRV_VDPA_LOG(ERR, "DMA unmap failed. Times: %u", i); + DRV_VDPA_LOG(ERR, "DMA unmap failed. Times: %u.", i); } } @@ -225,7 +225,7 @@ nfp_vdpa_dma_map(struct nfp_vdpa_dev *device, } vfio_container_fd = device->vfio_container_fd; - DRV_VDPA_LOG(DEBUG, "vfio_container_fd %d", vfio_container_fd); + DRV_VDPA_LOG(DEBUG, "The vfio_container_fd %d.", vfio_container_fd); if (do_map) ret = nfp_vdpa_dma_do_map(mem, mem->nregions, vfio_container_fd); @@ -533,7 +533,7 @@ nfp_vdpa_enable_vfio_intr(struct nfp_vdpa_dev *device, for (i = 0; i < nr_vring; i += 2) { fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (fd < 0) { - DRV_VDPA_LOG(ERR, "Can't setup eventfd"); + DRV_VDPA_LOG(ERR, "Can't setup eventfd."); return -EINVAL; } @@ -587,7 +587,7 @@ nfp_vdpa_read_kickfd(int kickfd) if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) { - DRV_VDPA_LOG(ERR, "Error reading kickfd"); + DRV_VDPA_LOG(ERR, "Error reading kickfd."); break; } } @@ -609,7 +609,7 @@ nfp_vdpa_notify_epoll_ctl(uint32_t queue_num, ev.data.u64 = qid | (uint64_t)vring.kickfd << 32; ret = epoll_ctl(device->epoll_fd, EPOLL_CTL_ADD, vring.kickfd, &ev); if (ret < 0) { - DRV_VDPA_LOG(ERR, "Epoll add error for queue %d", qid); + DRV_VDPA_LOG(ERR, "Epoll add error for queue %d.", qid); return ret; } } @@ -633,7 +633,7 @@ nfp_vdpa_notify_epoll_wait(uint32_t queue_num, if (errno == EINTR) continue; - DRV_VDPA_LOG(ERR, "Epoll wait fail"); + DRV_VDPA_LOG(ERR, "Epoll wait fail."); return -EACCES; } @@ -659,7 +659,7 @@ nfp_vdpa_notify_relay(void *arg) epoll_fd = epoll_create(NFP_VDPA_MAX_QUEUES * 2); if (epoll_fd < 0) { - DRV_VDPA_LOG(ERR, "failed to create epoll instance."); + DRV_VDPA_LOG(ERR, "Failed to create epoll instance."); return 1; } @@ -794,7 +794,7 @@ nfp_vdpa_vring_epoll_ctl(uint32_t queue_num, ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32; ret = epoll_ctl(device->epoll_fd, EPOLL_CTL_ADD, vring.kickfd, &ev); if (ret < 0) { - DRV_VDPA_LOG(ERR, "Epoll add error for queue %u", qid); + DRV_VDPA_LOG(ERR, "Epoll add error for queue %u.", qid); return ret; } } @@ -808,7 +808,7 @@ nfp_vdpa_vring_epoll_ctl(uint32_t queue_num, ret = epoll_ctl(device->epoll_fd, EPOLL_CTL_ADD, device->intr_fd[qid], &ev); if (ret < 0) { - DRV_VDPA_LOG(ERR, "Epoll add error for queue %u", qid); + DRV_VDPA_LOG(ERR, "Epoll add error for queue %u.", qid); return ret; } @@ -834,7 +834,7 @@ nfp_vdpa_vring_epoll_wait(uint32_t queue_num, if (errno == EINTR) continue; - DRV_VDPA_LOG(ERR, "Epoll wait fail"); + DRV_VDPA_LOG(ERR, "Epoll wait fail."); return -EACCES; } @@ -966,7 +966,7 @@ nfp_vdpa_dev_config(int vid) vdev = rte_vhost_get_vdpa_device(vid); node = nfp_vdpa_find_node_by_vdev(vdev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev); + DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev); return -ENODEV; } @@ -993,7 +993,7 @@ nfp_vdpa_dev_close(int vid) vdev = rte_vhost_get_vdpa_device(vid); node = nfp_vdpa_find_node_by_vdev(vdev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev); + DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev); return -ENODEV; } @@ -1032,7 +1032,7 @@ nfp_vdpa_get_vfio_group_fd(int vid) vdev = rte_vhost_get_vdpa_device(vid); node = nfp_vdpa_find_node_by_vdev(vdev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev); + DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev); return -ENODEV; } @@ -1048,7 +1048,7 @@ nfp_vdpa_get_vfio_device_fd(int vid) vdev = rte_vhost_get_vdpa_device(vid); node = nfp_vdpa_find_node_by_vdev(vdev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev); + DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev); return -ENODEV; } @@ -1099,7 +1099,7 @@ nfp_vdpa_get_queue_num(struct rte_vdpa_device *vdev, node = nfp_vdpa_find_node_by_vdev(vdev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev); + DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev); return -ENODEV; } @@ -1147,12 +1147,12 @@ nfp_vdpa_set_features(int32_t vid) struct rte_vdpa_device *vdev; struct nfp_vdpa_dev_node *node; - DRV_VDPA_LOG(DEBUG, "Start vid=%d", vid); + DRV_VDPA_LOG(DEBUG, "Start vid=%d.", vid); vdev = rte_vhost_get_vdpa_device(vid); node = nfp_vdpa_find_node_by_vdev(vdev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p", vdev); + DRV_VDPA_LOG(ERR, "Invalid vDPA device: %p.", vdev); return -ENODEV; } @@ -1165,7 +1165,7 @@ nfp_vdpa_set_features(int32_t vid) if (device->hw.sw_lm) { ret = nfp_vdpa_sw_fallback(device); if (ret != 0) { - DRV_VDPA_LOG(ERR, "Software fallback start failed"); + DRV_VDPA_LOG(ERR, "Software fallback start failed."); return -1; } } @@ -1178,7 +1178,7 @@ nfp_vdpa_set_vring_state(int vid, int vring, int state) { - DRV_VDPA_LOG(DEBUG, "Start vid=%d, vring=%d, state=%d", vid, vring, state); + DRV_VDPA_LOG(DEBUG, "Start vid=%d, vring=%d, state=%d.", vid, vring, state); return 0; } @@ -1227,7 +1227,7 @@ nfp_vdpa_pci_probe(struct rte_pci_device *pci_dev) device->vdev = rte_vdpa_register_device(&pci_dev->device, &nfp_vdpa_ops); if (device->vdev == NULL) { - DRV_VDPA_LOG(ERR, "Failed to register device %s", pci_dev->name); + DRV_VDPA_LOG(ERR, "Failed to register device %s.", pci_dev->name); goto vfio_teardown; } @@ -1263,7 +1263,7 @@ nfp_vdpa_pci_remove(struct rte_pci_device *pci_dev) node = nfp_vdpa_find_node_by_pdev(pci_dev); if (node == NULL) { - DRV_VDPA_LOG(ERR, "Invalid device: %s", pci_dev->name); + DRV_VDPA_LOG(ERR, "Invalid device: %s.", pci_dev->name); return -ENODEV; } diff --git a/drivers/vdpa/nfp/nfp_vdpa_core.c b/drivers/vdpa/nfp/nfp_vdpa_core.c index 70aeb4a3ac..b3076104a0 100644 --- a/drivers/vdpa/nfp/nfp_vdpa_core.c +++ b/drivers/vdpa/nfp/nfp_vdpa_core.c @@ -64,7 +64,7 @@ nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw, hw = &vdpa_hw->super; hw->ctrl_bar = pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - DRV_CORE_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured."); + DRV_CORE_LOG(ERR, "The hw->ctrl_bar is NULL. BAR0 not configured."); return -ENODEV; } @@ -80,7 +80,7 @@ nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw, notify_base += NFP_VDPA_NOTIFY_ADDR_INTERVAL; vdpa_hw->notify_region = queue; - DRV_CORE_LOG(DEBUG, "notify_addr[%d] at %p, notify_addr[%d] at %p", + DRV_CORE_LOG(DEBUG, "The notify_addr[%d] at %p, notify_addr[%d] at %p.", idx, vdpa_hw->notify_addr[idx], idx + 1, vdpa_hw->notify_addr[idx + 1]); } diff --git a/examples/distributor/main.c b/examples/distributor/main.c index ddbc387c20..ea44939fba 100644 --- a/examples/distributor/main.c +++ b/examples/distributor/main.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #define RX_RING_SIZE 1024 #define TX_RING_SIZE 1024 diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c index edc28d5c63..0b88a27e7d 100644 --- a/examples/ethtool/lib/rte_ethtool.c +++ b/examples/ethtool/lib/rte_ethtool.c @@ -53,10 +53,8 @@ rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo) sizeof(drvinfo->bus_info)); memset(®_info, 0, sizeof(reg_info)); - rte_eth_dev_get_reg_info(port_id, ®_info); - n = reg_info.length; - if (n > 0) - drvinfo->regdump_len = n; + if (rte_eth_dev_get_reg_info(port_id, ®_info) == 0) + drvinfo->regdump_len = reg_info.length; else drvinfo->regdump_len = 0; diff --git a/examples/ipsec-secgw/meson.build b/examples/ipsec-secgw/meson.build index ccdaef1c4d..023d9cf039 100644 --- a/examples/ipsec-secgw/meson.build +++ b/examples/ipsec-secgw/meson.build @@ -23,3 +23,9 @@ sources = files( 'sp4.c', 'sp6.c', ) +app_cflags = ['-Wno-address-of-packed-member'] +foreach flag:app_cflags + if cc.has_argument(flag) + cflags += flag + endif +endforeach diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c index 7fabd5b8d5..92cdaa1ebe 100644 --- a/examples/l3fwd-graph/main.c +++ b/examples/l3fwd-graph/main.c @@ -1081,7 +1081,10 @@ main(int argc, char **argv) printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, n_tx_queue); - rte_eth_dev_info_get(portid, &dev_info); + ret = rte_eth_dev_info_get(portid, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Unable to get info for port %u\n", portid); ret = config_port_max_pkt_len(&local_port_conf, &dev_info); if (ret != 0) @@ -1213,7 +1216,12 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); + ret = rte_eth_dev_info_get(portid, &dev_info); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_dev_info_get: err=%d, port=%u\n", + ret, portid); + rxq_conf = dev_info.default_rxconf; rxq_conf.offloads = port_conf.rxmode.offloads; if (!per_port_pool) diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 2bb6b092c3..ae8b55924e 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -41,12 +41,13 @@ #include #include #include -#include +#include #include #include #include #include #include +#include #include "perf_core.h" #include "main.h" @@ -265,6 +266,9 @@ static uint32_t pause_duration = 1; static uint32_t scale_freq_min; static uint32_t scale_freq_max; +static int cpu_resume_latency = -1; +static int resume_latency_bk[RTE_MAX_LCORE]; + static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; @@ -440,8 +444,7 @@ power_timer_cb(__rte_unused struct rte_timer *tim, * check whether need to scale down frequency a step if it sleep a lot. */ if (sleep_time_ratio >= SCALING_DOWN_TIME_RATIO_THRESHOLD) { - if (rte_power_freq_down) - rte_power_freq_down(lcore_id); + rte_power_freq_down(lcore_id); } else if ( (unsigned)(stats[lcore_id].nb_rx_processed / stats[lcore_id].nb_iteration_looped) < MAX_PKT_BURST) { @@ -449,8 +452,7 @@ power_timer_cb(__rte_unused struct rte_timer *tim, * scale down a step if average packet per iteration less * than expectation. */ - if (rte_power_freq_down) - rte_power_freq_down(lcore_id); + rte_power_freq_down(lcore_id); } /** @@ -1344,11 +1346,9 @@ main_legacy_loop(__rte_unused void *dummy) } if (lcore_scaleup_hint == FREQ_HIGHEST) { - if (rte_power_freq_max) - rte_power_freq_max(lcore_id); + rte_power_freq_max(lcore_id); } else if (lcore_scaleup_hint == FREQ_HIGHER) { - if (rte_power_freq_up) - rte_power_freq_up(lcore_id); + rte_power_freq_up(lcore_id); } } else { /** @@ -1501,6 +1501,8 @@ print_usage(const char *prgname) " -U: set min/max frequency for uncore to maximum value\n" " -i (frequency index): set min/max frequency for uncore to specified frequency index\n" " --config (port,queue,lcore): rx queues configuration\n" + " --cpu-resume-latency LATENCY: set CPU resume latency to control C-state selection," + " 0 : just allow to enter C0-state\n" " --high-perf-cores CORELIST: list of high performance cores\n" " --perf-config: similar as config, cores specified as indices" " for bins containing high or regular performance cores\n" @@ -1524,8 +1526,12 @@ print_usage(const char *prgname) prgname); } +/* + * Caller must give the right upper limit so as to ensure receiver variable + * doesn't overflow. + */ static int -parse_int(const char *opt) +parse_uint(const char *opt, uint32_t max, uint32_t *res) { char *end = NULL; unsigned long val; @@ -1535,23 +1541,15 @@ parse_int(const char *opt) if ((opt[0] == '\0') || (end == NULL) || (*end != '\0')) return -1; - return val; -} - -static int parse_max_pkt_len(const char *pktlen) -{ - char *end = NULL; - unsigned long len; - - /* parse decimal string */ - len = strtoul(pktlen, &end, 10); - if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0')) + if (val > max) { + RTE_LOG(ERR, L3FWD_POWER, "%s parameter shouldn't exceed %u.\n", + opt, max); return -1; + } - if (len == 0) - return -1; + *res = val; - return len; + return 0; } static int @@ -1743,6 +1741,7 @@ parse_pmd_mgmt_config(const char *name) #define CMD_LINE_OPT_PAUSE_DURATION "pause-duration" #define CMD_LINE_OPT_SCALE_FREQ_MIN "scale-freq-min" #define CMD_LINE_OPT_SCALE_FREQ_MAX "scale-freq-max" +#define CMD_LINE_OPT_CPU_RESUME_LATENCY "cpu-resume-latency" /* Parse the argument given in the command line of the application */ static int @@ -1757,6 +1756,7 @@ parse_args(int argc, char **argv) {"perf-config", 1, 0, 0}, {"high-perf-cores", 1, 0, 0}, {"no-numa", 0, 0, 0}, + {CMD_LINE_OPT_CPU_RESUME_LATENCY, 1, 0, 0}, {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, 0}, {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0}, {CMD_LINE_OPT_LEGACY, 0, 0, 0}, @@ -1898,8 +1898,9 @@ parse_args(int argc, char **argv) if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_MAX_PKT_LEN, sizeof(CMD_LINE_OPT_MAX_PKT_LEN))) { + if (parse_uint(optarg, UINT32_MAX, &max_pkt_len) != 0) + return -1; printf("Custom frame size is configured\n"); - max_pkt_len = parse_max_pkt_len(optarg); } if (!strncmp(lgopts[option_index].name, @@ -1912,29 +1913,42 @@ parse_args(int argc, char **argv) if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_MAX_EMPTY_POLLS, sizeof(CMD_LINE_OPT_MAX_EMPTY_POLLS))) { + if (parse_uint(optarg, UINT32_MAX, &max_empty_polls) != 0) + return -1; printf("Maximum empty polls configured\n"); - max_empty_polls = parse_int(optarg); } if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_PAUSE_DURATION, sizeof(CMD_LINE_OPT_PAUSE_DURATION))) { + if (parse_uint(optarg, UINT32_MAX, &pause_duration) != 0) + return -1; printf("Pause duration configured\n"); - pause_duration = parse_int(optarg); } if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_SCALE_FREQ_MIN, sizeof(CMD_LINE_OPT_SCALE_FREQ_MIN))) { + if (parse_uint(optarg, UINT32_MAX, &scale_freq_min) != 0) + return -1; printf("Scaling frequency minimum configured\n"); - scale_freq_min = parse_int(optarg); } if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_SCALE_FREQ_MAX, sizeof(CMD_LINE_OPT_SCALE_FREQ_MAX))) { + if (parse_uint(optarg, UINT32_MAX, &scale_freq_max) != 0) + return -1; printf("Scaling frequency maximum configured\n"); - scale_freq_max = parse_int(optarg); + } + + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_CPU_RESUME_LATENCY, + sizeof(CMD_LINE_OPT_CPU_RESUME_LATENCY))) { + if (parse_uint(optarg, INT_MAX, + (uint32_t *)&cpu_resume_latency) != 0) + return -1; + printf("PM QoS configured\n"); } break; @@ -2260,6 +2274,35 @@ init_power_library(void) return -1; } } + + if (cpu_resume_latency != -1) { + RTE_LCORE_FOREACH(lcore_id) { + /* Back old CPU resume latency. */ + ret = rte_power_qos_get_cpu_resume_latency(lcore_id); + if (ret < 0) { + RTE_LOG(ERR, L3FWD_POWER, + "Failed to get cpu resume latency on lcore-%u, ret=%d.\n", + lcore_id, ret); + } + resume_latency_bk[lcore_id] = ret; + + /* + * Set the cpu resume latency of the worker lcore based + * on user's request. If set strict latency (0), just + * allow the CPU to enter the shallowest idle state to + * improve performance. + */ + ret = rte_power_qos_set_cpu_resume_latency(lcore_id, + cpu_resume_latency); + if (ret != 0) { + RTE_LOG(ERR, L3FWD_POWER, + "Failed to set cpu resume latency on lcore-%u, ret=%d.\n", + lcore_id, ret); + return ret; + } + } + } + return ret; } @@ -2299,6 +2342,15 @@ deinit_power_library(void) } } } + + if (cpu_resume_latency != -1) { + RTE_LCORE_FOREACH(lcore_id) { + /* Restore the original value. */ + rte_power_qos_set_cpu_resume_latency(lcore_id, + resume_latency_bk[lcore_id]); + } + } + return ret; } diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c index 6c0f7ea213..1b5419119a 100644 --- a/examples/l3fwd-power/perf_core.c +++ b/examples/l3fwd-power/perf_core.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include "perf_core.h" diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c index b635011ef7..c30ba07c1a 100644 --- a/examples/l3fwd/l3fwd_acl.c +++ b/examples/l3fwd/l3fwd_acl.c @@ -993,11 +993,15 @@ dump_denied_pkt(const struct rte_mbuf *pkt, uint32_t res) #endif } -static inline void +/* + * run packets through ACL classify. + * returns number of packets to be dropped (hops[i] == BAD_PORT) + */ +static inline uint32_t acl_process_pkts(struct rte_mbuf *pkts[MAX_PKT_BURST], uint16_t hops[MAX_PKT_BURST], uint32_t num, int32_t socketid) { - uint32_t i, n4, n6, res; + uint32_t i, k, n4, n6, res; struct acl_search_t acl_search; /* split packets burst depending on packet type (IPv4/IPv6) */ @@ -1020,6 +1024,7 @@ acl_process_pkts(struct rte_mbuf *pkts[MAX_PKT_BURST], /* combine lookup results back, into one array of next hops */ n4 = 0; n6 = 0; + k = 0; for (i = 0; i != num; i++) { switch (acl_search.types[i]) { case TYPE_IPV4: @@ -1034,21 +1039,33 @@ acl_process_pkts(struct rte_mbuf *pkts[MAX_PKT_BURST], if (likely((res & ACL_DENY_SIGNATURE) == 0 && res != 0)) hops[i] = res - FWD_PORT_SHIFT; else { + /* bad or denied by ACL rule packets */ hops[i] = BAD_PORT; dump_denied_pkt(pkts[i], res); + k++; } } + + return k; } +/* + * send_packets_multi() can't deal properly with hops[i] == BAD_PORT + * (it assumes input hops[] contain only valid port numbers), + * so it is ok to use it only when there are no denied packets. + */ static inline void acl_send_packets(struct lcore_conf *qconf, struct rte_mbuf *pkts[], - uint16_t hops[], uint32_t num) + uint16_t hops[], uint32_t num, uint32_t nb_drop) { #if defined ACL_SEND_MULTI - send_packets_multi(qconf, pkts, hops, num); + if (nb_drop == 0) + send_packets_multi(qconf, pkts, hops, num); + else #else - send_packets_single(qconf, pkts, hops, num); + RTE_SET_USED(nb_drop); #endif + send_packets_single(qconf, pkts, hops, num); } /* main processing loop */ @@ -1056,10 +1073,10 @@ int acl_main_loop(__rte_unused void *dummy) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; - uint16_t hops[MAX_PKT_BURST]; + uint16_t hops[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; unsigned int lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; - int i, nb_rx; + int i, nb_drop, nb_rx; uint16_t portid; uint16_t queueid; struct lcore_conf *qconf; @@ -1122,10 +1139,10 @@ acl_main_loop(__rte_unused void *dummy) pkts_burst, MAX_PKT_BURST); if (nb_rx > 0) { - acl_process_pkts(pkts_burst, hops, nb_rx, - socketid); + nb_drop = acl_process_pkts(pkts_burst, hops, + nb_rx, socketid); acl_send_packets(qconf, pkts_burst, hops, - nb_rx); + nb_rx, nb_drop); } } } diff --git a/examples/l3fwd/l3fwd_altivec.h b/examples/l3fwd/l3fwd_altivec.h index e45e138e59..b91a6b5587 100644 --- a/examples/l3fwd/l3fwd_altivec.h +++ b/examples/l3fwd/l3fwd_altivec.h @@ -11,6 +11,9 @@ #include "altivec/port_group.h" #include "l3fwd_common.h" +#undef SENDM_PORT_OVERHEAD +#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) + /* * Update source and destination MAC addresses in the ethernet header. * Perform RFC1812 checks and updates for IPV4 packets. @@ -117,7 +120,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) */ static __rte_always_inline void send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, - uint16_t dst_port[MAX_PKT_BURST], int nb_rx) + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], + int nb_rx) { int32_t k; int j = 0; diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h index 224b1c08e8..d94e5f1357 100644 --- a/examples/l3fwd/l3fwd_common.h +++ b/examples/l3fwd/l3fwd_common.h @@ -18,6 +18,13 @@ /* Minimum value of IPV4 total length (20B) in network byte order. */ #define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8) +/* + * send_packet_multi() specific number of dest ports + * due to implementation we need to allocate array bigger then + * actual max number of elements in the array. + */ +#define SENDM_PORT_OVERHEAD(x) (x) + /* * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2: * - The IP version number must be 4. diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h index 31cda9ddc1..c1d819997a 100644 --- a/examples/l3fwd/l3fwd_em_hlm.h +++ b/examples/l3fwd/l3fwd_em_hlm.h @@ -249,7 +249,7 @@ static inline void l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, struct lcore_conf *qconf) { - uint16_t dst_port[MAX_PKT_BURST]; + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf, 0); send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); diff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h index 067f23889a..3a40b2e434 100644 --- a/examples/l3fwd/l3fwd_em_sequential.h +++ b/examples/l3fwd/l3fwd_em_sequential.h @@ -79,7 +79,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, struct lcore_conf *qconf) { int32_t i, j; - uint16_t dst_port[MAX_PKT_BURST]; + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; if (nb_rx > 0) { rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0], diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c index a0eef05a5d..82bc775a4c 100644 --- a/examples/l3fwd/l3fwd_fib.c +++ b/examples/l3fwd/l3fwd_fib.c @@ -121,7 +121,7 @@ fib_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, { uint32_t ipv4_arr[nb_rx]; struct rte_ipv6_addr ipv6_arr[nb_rx]; - uint16_t hops[nb_rx]; + uint16_t hops[SENDM_PORT_OVERHEAD(nb_rx)]; uint64_t hopsv4[nb_rx], hopsv6[nb_rx]; uint8_t type_arr[nb_rx]; uint32_t ipv4_cnt = 0, ipv6_cnt = 0; @@ -672,8 +672,12 @@ setup_fib(const int socketid) enabled_port_mask) == 0) continue; - rte_eth_dev_info_get(route_base_v4[i].if_out, - &dev_info); + ret = rte_eth_dev_info_get(route_base_v4[i].if_out, &dev_info); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Unable to get device info for port %u\n", + route_base_v4[i].if_out); + ret = rte_fib_add(ipv4_l3fwd_fib_lookup_struct[socketid], route_base_v4[i].ip, route_base_v4[i].depth, @@ -726,8 +730,12 @@ setup_fib(const int socketid) enabled_port_mask) == 0) continue; - rte_eth_dev_info_get(route_base_v6[i].if_out, - &dev_info); + ret = rte_eth_dev_info_get(route_base_v6[i].if_out, &dev_info); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Unable to get device info for port %u\n", + route_base_v6[i].if_out); + ret = rte_fib6_add(ipv6_l3fwd_fib_lookup_struct[socketid], &route_base_v6[i].ip6, route_base_v6[i].depth, diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c index fc4f5878fc..14596975a5 100644 --- a/examples/l3fwd/l3fwd_lpm.c +++ b/examples/l3fwd/l3fwd_lpm.c @@ -588,8 +588,11 @@ setup_lpm(const int socketid) enabled_port_mask) == 0) continue; - rte_eth_dev_info_get(route_base_v4[i].if_out, - &dev_info); + ret = rte_eth_dev_info_get(route_base_v4[i].if_out, &dev_info); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Unable to get device info for port %u\n", + route_base_v4[i].if_out); + ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid], route_base_v4[i].ip, route_base_v4[i].depth, @@ -632,8 +635,11 @@ setup_lpm(const int socketid) enabled_port_mask) == 0) continue; - rte_eth_dev_info_get(route_base_v6[i].if_out, - &dev_info); + ret = rte_eth_dev_info_get(route_base_v6[i].if_out, &dev_info); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Unable to get device info for port %u\n", + route_base_v6[i].if_out); + ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid], &route_base_v6[i].ip6, route_base_v6[i].depth, diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h index adb82f1478..91aad5c313 100644 --- a/examples/l3fwd/l3fwd_lpm_altivec.h +++ b/examples/l3fwd/l3fwd_lpm_altivec.h @@ -145,7 +145,7 @@ static inline void l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t portid, struct lcore_conf *qconf) { - uint16_t dst_port[MAX_PKT_BURST]; + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, 0); diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h index 2a68c4c15e..3c1f827424 100644 --- a/examples/l3fwd/l3fwd_lpm_neon.h +++ b/examples/l3fwd/l3fwd_lpm_neon.h @@ -171,7 +171,7 @@ static inline void l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, struct lcore_conf *qconf) { - uint16_t dst_port[MAX_PKT_BURST]; + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, 0); diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h index db15030320..50f1abbd8a 100644 --- a/examples/l3fwd/l3fwd_lpm_sse.h +++ b/examples/l3fwd/l3fwd_lpm_sse.h @@ -129,7 +129,7 @@ static inline void l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, struct lcore_conf *qconf) { - uint16_t dst_port[MAX_PKT_BURST]; + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, 0); diff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h index 40807d5965..bc2bab8265 100644 --- a/examples/l3fwd/l3fwd_neon.h +++ b/examples/l3fwd/l3fwd_neon.h @@ -10,6 +10,9 @@ #include "neon/port_group.h" #include "l3fwd_common.h" +#undef SENDM_PORT_OVERHEAD +#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) + /* * Update source and destination MAC addresses in the ethernet header. * Perform RFC1812 checks and updates for IPV4 packets. @@ -92,7 +95,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) */ static __rte_always_inline void send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, - uint16_t dst_port[MAX_PKT_BURST], int nb_rx) + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], + int nb_rx) { int32_t k; int j = 0; diff --git a/examples/l3fwd/l3fwd_sse.h b/examples/l3fwd/l3fwd_sse.h index 083729cdef..6236b7873c 100644 --- a/examples/l3fwd/l3fwd_sse.h +++ b/examples/l3fwd/l3fwd_sse.h @@ -10,6 +10,9 @@ #include "sse/port_group.h" #include "l3fwd_common.h" +#undef SENDM_PORT_OVERHEAD +#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) + /* * Update source and destination MAC addresses in the ethernet header. * Perform RFC1812 checks and updates for IPV4 packets. @@ -91,7 +94,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) */ static __rte_always_inline void send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, - uint16_t dst_port[MAX_PKT_BURST], int nb_rx) + uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], + int nb_rx) { int32_t k; int j = 0; diff --git a/examples/ntb/ntb_fwd.c b/examples/ntb/ntb_fwd.c index 56c7672392..37d60208e3 100644 --- a/examples/ntb/ntb_fwd.c +++ b/examples/ntb/ntb_fwd.c @@ -1285,7 +1285,10 @@ main(int argc, char **argv) eth_port_id = rte_eth_find_next(0); if (eth_port_id < RTE_MAX_ETHPORTS) { - rte_eth_dev_info_get(eth_port_id, ðdev_info); + ret = rte_eth_dev_info_get(eth_port_id, ðdev_info); + if (ret) + rte_exit(EXIT_FAILURE, "Can't get info for port %u\n", eth_port_id); + eth_pconf.rx_adv_conf.rss_conf.rss_hf &= ethdev_info.flow_type_rss_offloads; ret = rte_eth_dev_configure(eth_port_id, num_queues, diff --git a/examples/pipeline/cli.c b/examples/pipeline/cli.c index 015717cb39..215b4061d5 100644 --- a/examples/pipeline/cli.c +++ b/examples/pipeline/cli.c @@ -390,14 +390,15 @@ ethdev_show(uint16_t port_id, char **out, size_t *out_size) uint32_t length; uint16_t mtu = 0; - if (!rte_eth_dev_is_valid_port(port_id)) + if (rte_eth_dev_info_get(port_id, &info) != 0) + return; + + if (rte_eth_link_get(port_id, &link) != 0) return; rte_eth_dev_get_name_by_port(port_id, name); - rte_eth_dev_info_get(port_id, &info); rte_eth_stats_get(port_id, &stats); rte_eth_macaddr_get(port_id, &addr); - rte_eth_link_get(port_id, &link); rte_eth_dev_get_mtu(port_id, &mtu); snprintf(*out, *out_size, diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c index afb61bba51..23fa487081 100644 --- a/examples/ptpclient/ptpclient.c +++ b/examples/ptpclient/ptpclient.c @@ -46,6 +46,35 @@ static volatile bool force_quit; #define KERNEL_TIME_ADJUST_LIMIT 20000 #define PTP_PROTOCOL 0x88F7 +#define KP 0.7 +#define KI 0.3 +#define FREQ_EST_MARGIN 0.001 + +enum servo_state { + SERVO_UNLOCKED, + SERVO_JUMP, + SERVO_LOCKED, +}; + +struct pi_servo { + double offset[2]; + double local[2]; + double drift; + double last_freq; + int count; + + double max_frequency; + double step_threshold; + double first_step_threshold; + int first_update; +}; + +enum controller_mode { + MODE_NONE, + MODE_PI, + MAX_ALL +} mode = MODE_NONE; + struct rte_mempool *mbuf_pool; uint32_t ptp_enabled_port_mask; uint8_t ptp_enabled_port_nb; @@ -119,14 +148,14 @@ struct ptp_message { } __rte_packed; }; -struct ptpv2_data_slave_ordinary { +struct ptpv2_time_receiver_ordinary { struct rte_mbuf *m; struct timespec tstamp1; struct timespec tstamp2; struct timespec tstamp3; struct timespec tstamp4; struct clock_id client_clock_id; - struct clock_id master_clock_id; + struct clock_id transmitter_clock_id; struct timeval new_adj; int64_t delta; uint16_t portid; @@ -135,9 +164,12 @@ struct ptpv2_data_slave_ordinary { uint8_t ptpset; uint8_t kernel_time_set; uint16_t current_ptp_port; + int64_t master_offset; + int64_t path_delay; + struct pi_servo *servo; }; -static struct ptpv2_data_slave_ordinary ptp_data; +static struct ptpv2_time_receiver_ordinary ptp_data; static inline uint64_t timespec64_to_ns(const struct timespec *ts) { @@ -262,67 +294,88 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; } + /* + * If the clock servo controller is enabled, the PMD must support + * adjustment of the clock frequency. + */ + if (mode != MODE_NONE) { + retval = rte_eth_timesync_adjust_freq(port, 0); + if (retval == -ENOTSUP) { + printf("The servo controller cannot work on devices that" + " do not support frequency adjustment.\n"); + return retval; + } + } + return 0; } static void -print_clock_info(struct ptpv2_data_slave_ordinary *ptp_data) +print_clock_info(struct ptpv2_time_receiver_ordinary *ptp_data) { int64_t nsec; struct timespec net_time, sys_time; - printf("Master Clock id: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", - ptp_data->master_clock_id.id[0], - ptp_data->master_clock_id.id[1], - ptp_data->master_clock_id.id[2], - ptp_data->master_clock_id.id[3], - ptp_data->master_clock_id.id[4], - ptp_data->master_clock_id.id[5], - ptp_data->master_clock_id.id[6], - ptp_data->master_clock_id.id[7]); - - printf("\nT2 - Slave Clock. %lds %ldns", + printf("time transmitter clock id: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", + ptp_data->transmitter_clock_id.id[0], + ptp_data->transmitter_clock_id.id[1], + ptp_data->transmitter_clock_id.id[2], + ptp_data->transmitter_clock_id.id[3], + ptp_data->transmitter_clock_id.id[4], + ptp_data->transmitter_clock_id.id[5], + ptp_data->transmitter_clock_id.id[6], + ptp_data->transmitter_clock_id.id[7]); + + printf("\nT2 - time receiver clock. %lds %ldns", (ptp_data->tstamp2.tv_sec), (ptp_data->tstamp2.tv_nsec)); - printf("\nT1 - Master Clock. %lds %ldns ", + printf("\nT1 - time transmitter clock. %lds %ldns ", ptp_data->tstamp1.tv_sec, (ptp_data->tstamp1.tv_nsec)); - printf("\nT3 - Slave Clock. %lds %ldns", + printf("\nT3 - time receiver clock. %lds %ldns", ptp_data->tstamp3.tv_sec, (ptp_data->tstamp3.tv_nsec)); - printf("\nT4 - Master Clock. %lds %ldns ", + printf("\nT4 - time transmitter clock. %lds %ldns\n", ptp_data->tstamp4.tv_sec, (ptp_data->tstamp4.tv_nsec)); - printf("\nDelta between master and slave clocks:%"PRId64"ns\n", + if (mode == MODE_NONE) { + printf("\nDelta between transmitter and receiver clocks:%"PRId64"ns\n", ptp_data->delta); - clock_gettime(CLOCK_REALTIME, &sys_time); - rte_eth_timesync_read_time(ptp_data->current_ptp_port, &net_time); + clock_gettime(CLOCK_REALTIME, &sys_time); + rte_eth_timesync_read_time(ptp_data->current_ptp_port, + &net_time); - time_t ts = net_time.tv_sec; + time_t ts = net_time.tv_sec; - printf("\n\nComparison between Linux kernel Time and PTP:"); + printf("\n\nComparison between Linux kernel Time and PTP:"); - printf("\nCurrent PTP Time: %.24s %.9ld ns", + printf("\nCurrent PTP Time: %.24s %.9ld ns", ctime(&ts), net_time.tv_nsec); - nsec = (int64_t)timespec64_to_ns(&net_time) - + nsec = (int64_t)timespec64_to_ns(&net_time) - (int64_t)timespec64_to_ns(&sys_time); - ptp_data->new_adj = ns_to_timeval(nsec); + ptp_data->new_adj = ns_to_timeval(nsec); - gettimeofday(&ptp_data->new_adj, NULL); + gettimeofday(&ptp_data->new_adj, NULL); - time_t tp = ptp_data->new_adj.tv_sec; + time_t tp = ptp_data->new_adj.tv_sec; - printf("\nCurrent SYS Time: %.24s %.6ld ns", - ctime(&tp), ptp_data->new_adj.tv_usec); + printf("\nCurrent SYS Time: %.24s %.6ld ns", + ctime(&tp), ptp_data->new_adj.tv_usec); - printf("\nDelta between PTP and Linux Kernel time:%"PRId64"ns\n", - nsec); + printf("\nDelta between PTP and Linux Kernel time:%"PRId64"ns\n", + nsec); + } + + if (mode == MODE_PI) { + printf("path delay: %"PRId64"ns\n", ptp_data->path_delay); + printf("time transmitter offset: %"PRId64"ns\n", ptp_data->master_offset); + } printf("[Ctrl+C to quit]\n"); @@ -331,7 +384,7 @@ print_clock_info(struct ptpv2_data_slave_ordinary *ptp_data) } static int64_t -delta_eval(struct ptpv2_data_slave_ordinary *ptp_data) +delta_eval(struct ptpv2_time_receiver_ordinary *ptp_data) { int64_t delta; uint64_t t1 = 0; @@ -353,7 +406,7 @@ delta_eval(struct ptpv2_data_slave_ordinary *ptp_data) * Parse the PTP SYNC message. */ static void -parse_sync(struct ptpv2_data_slave_ordinary *ptp_data, uint16_t rx_tstamp_idx) +parse_sync(struct ptpv2_time_receiver_ordinary *ptp_data, uint16_t rx_tstamp_idx) { struct ptp_header *ptp_hdr; @@ -362,7 +415,7 @@ parse_sync(struct ptpv2_data_slave_ordinary *ptp_data, uint16_t rx_tstamp_idx) ptp_data->seqID_SYNC = rte_be_to_cpu_16(ptp_hdr->seq_id); if (ptp_data->ptpset == 0) { - rte_memcpy(&ptp_data->master_clock_id, + rte_memcpy(&ptp_data->transmitter_clock_id, &ptp_hdr->source_port_id.clock_id, sizeof(struct clock_id)); ptp_data->ptpset = 1; @@ -383,7 +436,7 @@ parse_sync(struct ptpv2_data_slave_ordinary *ptp_data, uint16_t rx_tstamp_idx) * Parse the PTP FOLLOWUP message and send DELAY_REQ to the main clock. */ static void -parse_fup(struct ptpv2_data_slave_ordinary *ptp_data) +parse_fup(struct ptpv2_time_receiver_ordinary *ptp_data) { struct rte_ether_hdr *eth_hdr; struct rte_ether_addr eth_addr; @@ -402,7 +455,7 @@ parse_fup(struct ptpv2_data_slave_ordinary *ptp_data) eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); ptp_hdr = rte_pktmbuf_mtod_offset(m, struct ptp_header *, sizeof(struct rte_ether_hdr)); - if (memcmp(&ptp_data->master_clock_id, + if (memcmp(&ptp_data->transmitter_clock_id, &ptp_hdr->source_port_id.clock_id, sizeof(struct clock_id)) != 0) return; @@ -529,11 +582,154 @@ update_kernel_time(void) } +static void +clock_path_delay(struct ptpv2_time_receiver_ordinary *ptp_data) +{ + uint64_t t1_ns, t2_ns, t3_ns, t4_ns; + int64_t pd, diff; + + t1_ns = timespec64_to_ns(&ptp_data->tstamp1); + t2_ns = timespec64_to_ns(&ptp_data->tstamp2); + t3_ns = timespec64_to_ns(&ptp_data->tstamp3); + t4_ns = timespec64_to_ns(&ptp_data->tstamp4); + + pd = (t2_ns - t3_ns) + (t4_ns - t1_ns); + diff = t3_ns - t2_ns; + if (diff <= INT32_MAX && diff >= INT32_MIN) + ptp_data->path_delay = pd / 2; + else + ptp_data->path_delay = 0; +} + +static double +pi_sample(struct pi_servo *s, int64_t offset, double local_ts, + enum servo_state *state) +{ + double ki_term, ppb = s->last_freq; + double freq_est_interval, localdiff; + + switch (s->count) { + case 0: + s->offset[0] = offset; + s->local[0] = local_ts; + *state = SERVO_UNLOCKED; + s->count = 1; + break; + case 1: + s->offset[1] = offset; + s->local[1] = local_ts; + + /* Make sure the first sample is older than the second. */ + if (s->local[0] >= s->local[1]) { + *state = SERVO_UNLOCKED; + s->count = 0; + break; + } + + /* Wait long enough before estimating the frequency offset. */ + localdiff = (s->local[1] - s->local[0]) / 1e9; + localdiff += localdiff * FREQ_EST_MARGIN; + freq_est_interval = 0.016 / KI; + if (freq_est_interval > 1000.0) + freq_est_interval = 1000.0; + + if (localdiff < freq_est_interval) { + *state = SERVO_UNLOCKED; + break; + } + + /* Adjust drift by the measured frequency offset. */ + s->drift += (1e9 - s->drift) * (s->offset[1] - s->offset[0]) / + (s->local[1] - s->local[0]); + + if (s->drift < -s->max_frequency) + s->drift = -s->max_frequency; + else if (s->drift > s->max_frequency) + s->drift = s->max_frequency; + + if ((s->first_update && + s->first_step_threshold && + s->first_step_threshold < llabs(offset)) || + (s->step_threshold && + s->step_threshold < llabs(offset))) + *state = SERVO_JUMP; + else + *state = SERVO_LOCKED; + + ppb = s->drift; + s->count = 2; + break; + case 2: + /* + * reset the clock servo when offset is greater than the max + * offset value. Note that the clock jump will be performed in + * step 1, so it is not necessary to have clock jump + * immediately. This allows re-calculating drift as in initial + * clock startup. + */ + if (s->step_threshold && + s->step_threshold < llabs(offset)) { + *state = SERVO_UNLOCKED; + s->count = 0; + break; + } + + ki_term = KI * offset; + ppb = KP * offset + s->drift + ki_term; + if (ppb < -s->max_frequency) + ppb = -s->max_frequency; + else if (ppb > s->max_frequency) + ppb = s->max_frequency; + else + s->drift += ki_term; + + *state = SERVO_LOCKED; + break; + } + + s->last_freq = ppb; + return ppb; +} + +static void +ptp_adjust_servo(struct ptpv2_time_receiver_ordinary *ptp_data) +{ + uint64_t t1_ns, t2_ns; + double adj_freq; + enum servo_state state = SERVO_UNLOCKED; + + t1_ns = timespec64_to_ns(&ptp_data->tstamp1); + t2_ns = timespec64_to_ns(&ptp_data->tstamp2); + ptp_data->master_offset = t2_ns - t1_ns - ptp_data->path_delay; + if (!ptp_data->path_delay) + return; + + adj_freq = pi_sample(ptp_data->servo, ptp_data->master_offset, t2_ns, + &state); + + switch (state) { + case SERVO_UNLOCKED: + break; + case SERVO_JUMP: + ptp_data->servo->first_update = 0; + rte_eth_timesync_adjust_freq(ptp_data->portid, + -(long)(adj_freq * 65.536)); + rte_eth_timesync_adjust_time(ptp_data->portid, + -ptp_data->master_offset); + break; + case SERVO_LOCKED: + ptp_data->servo->first_update = 0; + rte_eth_timesync_adjust_freq(ptp_data->portid, + -(long)(adj_freq * 65.536)); + break; + } +} + /* * Parse the DELAY_RESP message. */ static void -parse_drsp(struct ptpv2_data_slave_ordinary *ptp_data) +parse_drsp(struct ptpv2_time_receiver_ordinary *ptp_data) { struct rte_mbuf *m = ptp_data->m; struct ptp_message *ptp_msg; @@ -553,11 +749,16 @@ parse_drsp(struct ptpv2_data_slave_ordinary *ptp_data) ((uint64_t)ntohl(rx_tstamp->sec_lsb)) | (((uint64_t)ntohs(rx_tstamp->sec_msb)) << 32); - /* Evaluate the delta for adjustment. */ - ptp_data->delta = delta_eval(ptp_data); + if (mode == MODE_PI) { + clock_path_delay(ptp_data); + ptp_adjust_servo(ptp_data); + } else { + /* Evaluate the delta for adjustment. */ + ptp_data->delta = delta_eval(ptp_data); - rte_eth_timesync_adjust_time(ptp_data->portid, - ptp_data->delta); + rte_eth_timesync_adjust_time(ptp_data->portid, + ptp_data->delta); + } ptp_data->current_ptp_port = ptp_data->portid; @@ -571,7 +772,7 @@ parse_drsp(struct ptpv2_data_slave_ordinary *ptp_data) } } -/* This function processes PTP packets, implementing slave PTP IEEE1588 L2 +/* This function processes PTP packets, implementing time receiver PTP IEEE1588 L2 * functionality. */ @@ -652,7 +853,9 @@ print_usage(const char *prgname) printf("%s [EAL options] -- -p PORTMASK -T VALUE\n" " -T VALUE: 0 - Disable, 1 - Enable Linux Clock" " Synchronization (0 default)\n" - " -p PORTMASK: hexadecimal bitmask of ports to configure\n", + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -c CONTROLLER: 0 - Not used, 1 - PI. The servo which is" + " used to synchronize the local clock. (0 default)\n", prgname); } @@ -688,6 +891,36 @@ parse_ptp_kernel(const char *param) return 1; } +static int +parse_ptp_servo_mode(const char *param) +{ + char *end = NULL; + unsigned long pm; + + /* Parse the hexadecimal string. */ + pm = strtoul(param, &end, 10); + + if ((param[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + return pm; +} + +static void +servo_init(struct pi_servo *servo) +{ + memset(servo, 0x00, sizeof(*servo)); + + servo->drift = 100000000; + servo->last_freq = 100000000; + servo->count = 0; + + servo->max_frequency = 100000000; + servo->step_threshold = 0.1 * NSEC_PER_SEC; + servo->first_step_threshold = 0.00002 * NSEC_PER_SEC; + servo->first_update = 1; +} + /* Parse the commandline arguments. */ static int ptp_parse_args(int argc, char **argv) @@ -700,7 +933,7 @@ ptp_parse_args(int argc, char **argv) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "p:T:", + while ((opt = getopt_long(argc, argvopt, "p:T:c:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -724,6 +957,17 @@ ptp_parse_args(int argc, char **argv) ptp_data.kernel_time_set = ret; break; + case 'c': + ret = parse_ptp_servo_mode(optarg); + if (ret == 0) { + mode = MODE_NONE; + } else if (ret == 1) { + mode = MODE_PI; + } else { + print_usage(prgname); + return -1; + } + break; default: print_usage(prgname); @@ -763,7 +1007,7 @@ main(int argc, char *argv[]) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); /* >8 End of initialization of EAL. */ - memset(&ptp_data, '\0', sizeof(struct ptpv2_data_slave_ordinary)); + memset(&ptp_data, 0, sizeof(struct ptpv2_time_receiver_ordinary)); /* Parse specific arguments. 8< */ argc -= ret; @@ -778,6 +1022,14 @@ main(int argc, char *argv[]) rte_exit(EXIT_FAILURE, "Error with PTP initialization\n"); /* >8 End of parsing specific arguments. */ + if (mode == MODE_PI) { + ptp_data.servo = malloc(sizeof(*(ptp_data.servo))); + if (!ptp_data.servo) + rte_exit(EXIT_FAILURE, "no memory for servo\n"); + + servo_init(ptp_data.servo); + } + /* Check that there is an even number of ports to send/receive on. */ nb_ports = rte_eth_dev_count_avail(); @@ -831,6 +1083,9 @@ main(int argc, char *argv[]) rte_eth_dev_close(portid); } + if (mode == MODE_PI) + free(ptp_data.servo); + /* clean up the EAL */ rte_eal_cleanup(); diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c index 32964fd57e..ace7279c67 100644 --- a/examples/qos_sched/init.c +++ b/examples/qos_sched/init.c @@ -323,6 +323,7 @@ int app_init(void) uint32_t i; char ring_name[MAX_NAME_LEN]; char pool_name[MAX_NAME_LEN]; + int ret; if (rte_eth_dev_count_avail() == 0) rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n"); @@ -368,12 +369,20 @@ int app_init(void) app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool); memset(&link, 0, sizeof(link)); - rte_eth_link_get(qos_conf[i].tx_port, &link); + ret = rte_eth_link_get(qos_conf[i].tx_port, &link); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_link_get: err=%d, port=%u: %s\n", + ret, qos_conf[i].tx_port, rte_strerror(-ret)); if (link.link_status == 0) printf("Waiting for link on port %u\n", qos_conf[i].tx_port); + while (link.link_status == 0 && retry_count--) { rte_delay_ms(retry_delay); - rte_eth_link_get(qos_conf[i].tx_port, &link); + ret = rte_eth_link_get(qos_conf[i].tx_port, &link); + rte_exit(EXIT_FAILURE, + "rte_eth_link_get: err=%d, port=%u: %s\n", + ret, qos_conf[i].tx_port, rte_strerror(-ret)); } qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket); diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c index f21556e27d..800f733a26 100644 --- a/examples/vm_power_manager/channel_monitor.c +++ b/examples/vm_power_manager/channel_monitor.c @@ -31,7 +31,8 @@ #ifdef RTE_NET_I40E #include #endif -#include +#include +#include #include #include "channel_monitor.h" diff --git a/examples/vm_power_manager/channel_monitor.h b/examples/vm_power_manager/channel_monitor.h index ab69524af5..fff6348ca4 100644 --- a/examples/vm_power_manager/channel_monitor.h +++ b/examples/vm_power_manager/channel_monitor.h @@ -5,7 +5,8 @@ #ifndef CHANNEL_MONITOR_H_ #define CHANNEL_MONITOR_H_ -#include +#include +#include #include "channel_manager.h" diff --git a/examples/vm_power_manager/guest_cli/main.c b/examples/vm_power_manager/guest_cli/main.c index 9da50020ac..40d4cb6d49 100644 --- a/examples/vm_power_manager/guest_cli/main.c +++ b/examples/vm_power_manager/guest_cli/main.c @@ -9,7 +9,8 @@ #include #include -#include +#include +#include #include #include #include diff --git a/examples/vm_power_manager/guest_cli/meson.build b/examples/vm_power_manager/guest_cli/meson.build index a69f809e3b..00bc32526d 100644 --- a/examples/vm_power_manager/guest_cli/meson.build +++ b/examples/vm_power_manager/guest_cli/meson.build @@ -6,7 +6,7 @@ # To build this example as a standalone application with an already-installed # DPDK instance, use 'make' -deps += ['power'] +deps += ['power', 'power_kvm_vm'] sources = files( 'main.c', diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c index 5eddb47847..14d1f3dd95 100644 --- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c @@ -18,7 +18,8 @@ #include #include -#include +#include +#include #include "vm_power_cli_guest.h" diff --git a/examples/vm_power_manager/meson.build b/examples/vm_power_manager/meson.build index b866d8fd54..dcf23198eb 100644 --- a/examples/vm_power_manager/meson.build +++ b/examples/vm_power_manager/meson.build @@ -6,7 +6,7 @@ # To build this example as a standalone application with an already-installed # DPDK instance, use 'make' -deps += ['power'] +deps += ['power', 'power_kvm_vm'] if dpdk_conf.has('RTE_NET_BNXT') deps += ['net_bnxt'] diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c index 0355a7f4bc..522c713ff4 100644 --- a/examples/vm_power_manager/power_manager.c +++ b/examples/vm_power_manager/power_manager.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include "channel_manager.h" diff --git a/kernel/linux/uapi/.gitignore b/kernel/linux/uapi/.gitignore new file mode 100644 index 0000000000..558ba597d6 --- /dev/null +++ b/kernel/linux/uapi/.gitignore @@ -0,0 +1,4 @@ +** +!**/ +!**/*.h +!version diff --git a/kernel/linux/uapi/linux/vduse.h b/kernel/linux/uapi/linux/vduse.h new file mode 100644 index 0000000000..11bd48c72c --- /dev/null +++ b/kernel/linux/uapi/linux/vduse.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_VDUSE_H_ +#define _UAPI_VDUSE_H_ + +#include + +#define VDUSE_BASE 0x81 + +/* The ioctls for control device (/dev/vduse/control) */ + +#define VDUSE_API_VERSION 0 + +/* + * Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION). + * This is used for future extension. + */ +#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64) + +/* Set the version of VDUSE API that userspace supported. */ +#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64) + +/** + * struct vduse_dev_config - basic configuration of a VDUSE device + * @name: VDUSE device name, needs to be NUL terminated + * @vendor_id: virtio vendor id + * @device_id: virtio device id + * @features: virtio features + * @vq_num: the number of virtqueues + * @vq_align: the allocation alignment of virtqueue's metadata + * @reserved: for future use, needs to be initialized to zero + * @config_size: the size of the configuration space + * @config: the buffer of the configuration space + * + * Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device. + */ +struct vduse_dev_config { +#define VDUSE_NAME_MAX 256 + char name[VDUSE_NAME_MAX]; + __u32 vendor_id; + __u32 device_id; + __u64 features; + __u32 vq_num; + __u32 vq_align; + __u32 reserved[13]; + __u32 config_size; + __u8 config[]; +}; + +/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */ +#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config) + +/* + * Destroy a VDUSE device. Make sure there are no more references + * to the char device (/dev/vduse/$NAME). + */ +#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX]) + +/* The ioctls for VDUSE device (/dev/vduse/$NAME) */ + +/** + * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last] + * @offset: the mmap offset on returned file descriptor + * @start: start of the IOVA region + * @last: last of the IOVA region + * @perm: access permission of the IOVA region + * + * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region. + */ +struct vduse_iotlb_entry { + __u64 offset; + __u64 start; + __u64 last; +#define VDUSE_ACCESS_RO 0x1 +#define VDUSE_ACCESS_WO 0x2 +#define VDUSE_ACCESS_RW 0x3 + __u8 perm; +}; + +/* + * Find the first IOVA region that overlaps with the range [start, last] + * and return the corresponding file descriptor. Return -EINVAL means the + * IOVA region doesn't exist. Caller should set start and last fields. + */ +#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry) + +/* + * Get the negotiated virtio features. It's a subset of the features in + * struct vduse_dev_config which can be accepted by virtio driver. It's + * only valid after FEATURES_OK status bit is set. + */ +#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64) + +/** + * struct vduse_config_data - data used to update configuration space + * @offset: the offset from the beginning of configuration space + * @length: the length to write to configuration space + * @buffer: the buffer used to write from + * + * Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device + * configuration space. + */ +struct vduse_config_data { + __u32 offset; + __u32 length; + __u8 buffer[]; +}; + +/* Set device configuration space */ +#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data) + +/* + * Inject a config interrupt. It's usually used to notify virtio driver + * that device configuration space has changed. + */ +#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13) + +/** + * struct vduse_vq_config - basic configuration of a virtqueue + * @index: virtqueue index + * @max_size: the max size of virtqueue + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue. + */ +struct vduse_vq_config { + __u32 index; + __u16 max_size; + __u16 reserved[13]; +}; + +/* + * Setup the specified virtqueue. Make sure all virtqueues have been + * configured before the device is attached to vDPA bus. + */ +#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config) + +/** + * struct vduse_vq_state_split - split virtqueue state + * @avail_index: available index + */ +struct vduse_vq_state_split { + __u16 avail_index; +}; + +/** + * struct vduse_vq_state_packed - packed virtqueue state + * @last_avail_counter: last driver ring wrap counter observed by device + * @last_avail_idx: device available index + * @last_used_counter: device ring wrap counter + * @last_used_idx: used index + */ +struct vduse_vq_state_packed { + __u16 last_avail_counter; + __u16 last_avail_idx; + __u16 last_used_counter; + __u16 last_used_idx; +}; + +/** + * struct vduse_vq_info - information of a virtqueue + * @index: virtqueue index + * @num: the size of virtqueue + * @desc_addr: address of desc area + * @driver_addr: address of driver area + * @device_addr: address of device area + * @split: split virtqueue state + * @packed: packed virtqueue state + * @ready: ready status of virtqueue + * + * Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information. + */ +struct vduse_vq_info { + __u32 index; + __u32 num; + __u64 desc_addr; + __u64 driver_addr; + __u64 device_addr; + union { + struct vduse_vq_state_split split; + struct vduse_vq_state_packed packed; + }; + __u8 ready; +}; + +/* Get the specified virtqueue's information. Caller should set index field. */ +#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info) + +/** + * struct vduse_vq_eventfd - eventfd configuration for a virtqueue + * @index: virtqueue index + * @fd: eventfd, -1 means de-assigning the eventfd + * + * Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd. + */ +struct vduse_vq_eventfd { + __u32 index; +#define VDUSE_EVENTFD_DEASSIGN -1 + int fd; +}; + +/* + * Setup kick eventfd for specified virtqueue. The kick eventfd is used + * by VDUSE kernel module to notify userspace to consume the avail vring. + */ +#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd) + +/* + * Inject an interrupt for specific virtqueue. It's used to notify virtio driver + * to consume the used vring. + */ +#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32) + +/** + * struct vduse_iova_umem - userspace memory configuration for one IOVA region + * @uaddr: start address of userspace memory, it must be aligned to page size + * @iova: start of the IOVA region + * @size: size of the IOVA region + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM + * ioctls to register/de-register userspace memory for IOVA regions + */ +struct vduse_iova_umem { + __u64 uaddr; + __u64 iova; + __u64 size; + __u64 reserved[3]; +}; + +/* Register userspace memory for IOVA regions */ +#define VDUSE_IOTLB_REG_UMEM _IOW(VDUSE_BASE, 0x18, struct vduse_iova_umem) + +/* De-register the userspace memory. Caller should set iova and size field. */ +#define VDUSE_IOTLB_DEREG_UMEM _IOW(VDUSE_BASE, 0x19, struct vduse_iova_umem) + +/** + * struct vduse_iova_info - information of one IOVA region + * @start: start of the IOVA region + * @last: last of the IOVA region + * @capability: capability of the IOVA regsion + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of + * one IOVA region. + */ +struct vduse_iova_info { + __u64 start; + __u64 last; +#define VDUSE_IOVA_CAP_UMEM (1 << 0) + __u64 capability; + __u64 reserved[3]; +}; + +/* + * Find the first IOVA region that overlaps with the range [start, last] + * and return some information on it. Caller should set start and last fields. + */ +#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info) + +/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */ + +/** + * enum vduse_req_type - request type + * @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace + * @VDUSE_SET_STATUS: set the device status + * @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for + * specified IOVA range via VDUSE_IOTLB_GET_FD ioctl + */ +enum vduse_req_type { + VDUSE_GET_VQ_STATE, + VDUSE_SET_STATUS, + VDUSE_UPDATE_IOTLB, +}; + +/** + * struct vduse_vq_state - virtqueue state + * @index: virtqueue index + * @split: split virtqueue state + * @packed: packed virtqueue state + */ +struct vduse_vq_state { + __u32 index; + union { + struct vduse_vq_state_split split; + struct vduse_vq_state_packed packed; + }; +}; + +/** + * struct vduse_dev_status - device status + * @status: device status + */ +struct vduse_dev_status { + __u8 status; +}; + +/** + * struct vduse_iova_range - IOVA range [start, last] + * @start: start of the IOVA range + * @last: last of the IOVA range + */ +struct vduse_iova_range { + __u64 start; + __u64 last; +}; + +/** + * struct vduse_dev_request - control request + * @type: request type + * @request_id: request id + * @reserved: for future use + * @vq_state: virtqueue state, only index field is available + * @s: device status + * @iova: IOVA range for updating + * @padding: padding + * + * Structure used by read(2) on /dev/vduse/$NAME. + */ +struct vduse_dev_request { + __u32 type; + __u32 request_id; + __u32 reserved[4]; + union { + struct vduse_vq_state vq_state; + struct vduse_dev_status s; + struct vduse_iova_range iova; + __u32 padding[32]; + }; +}; + +/** + * struct vduse_dev_response - response to control request + * @request_id: corresponding request id + * @result: the result of request + * @reserved: for future use, needs to be initialized to zero + * @vq_state: virtqueue state + * @padding: padding + * + * Structure used by write(2) on /dev/vduse/$NAME. + */ +struct vduse_dev_response { + __u32 request_id; +#define VDUSE_REQ_RESULT_OK 0x00 +#define VDUSE_REQ_RESULT_FAILED 0x01 + __u32 result; + __u32 reserved[4]; + union { + struct vduse_vq_state vq_state; + __u32 padding[32]; + }; +}; + +#endif /* _UAPI_VDUSE_H_ */ diff --git a/kernel/linux/uapi/version b/kernel/linux/uapi/version new file mode 100644 index 0000000000..d06407735c --- /dev/null +++ b/kernel/linux/uapi/version @@ -0,0 +1 @@ +v6.10 diff --git a/lib/eal/common/eal_common_debug.c b/lib/eal/common/eal_common_debug.c index 3e77995896..bcfcd6df6f 100644 --- a/lib/eal/common/eal_common_debug.c +++ b/lib/eal/common/eal_common_debug.c @@ -36,15 +36,13 @@ rte_exit(int exit_code, const char *format, ...) va_list ap; if (exit_code != 0) - RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n" - " Cause: ", exit_code); + EAL_LOG(CRIT, "Error - exiting with code: %d", exit_code); va_start(ap, format); rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap); va_end(ap); if (rte_eal_cleanup() != 0 && rte_errno != EALREADY) - EAL_LOG(CRIT, - "EAL could not release all resources"); + EAL_LOG(CRIT, "EAL could not release all resources"); exit(exit_code); } diff --git a/lib/eal/common/eal_common_lcore_var.c b/lib/eal/common/eal_common_lcore_var.c new file mode 100644 index 0000000000..a1b2458839 --- /dev/null +++ b/lib/eal/common/eal_common_lcore_var.c @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Ericsson AB + */ + +#include +#include + +#ifdef RTE_EXEC_ENV_WINDOWS +#include +#endif + +#include +#include +#include + +#include + +#include "eal_private.h" +#include "eal_lcore_var.h" + +/* + * Refer to the programmer's guide for an overview + * of the lcore variables implementation. + */ + +/* base unit */ +struct lcore_var_buffer { + char data[RTE_MAX_LCORE_VAR * RTE_MAX_LCORE]; + struct lcore_var_buffer *prev; +}; + +/* last allocated unit */ +static struct lcore_var_buffer *current_buffer; + +/* initialized to trigger buffer allocation on first allocation */ +static size_t offset = RTE_MAX_LCORE_VAR; + +/* >8 end of documented variables */ + +static void * +lcore_var_alloc(size_t size, size_t align) +{ + void *handle; + unsigned int lcore_id; + void *value; + + offset = RTE_ALIGN_CEIL(offset, align); + + if (offset + size > RTE_MAX_LCORE_VAR) { + struct lcore_var_buffer *prev = current_buffer; + size_t alloc_size = + RTE_ALIGN_CEIL(sizeof(struct lcore_var_buffer), RTE_CACHE_LINE_SIZE); +#ifdef RTE_EXEC_ENV_WINDOWS + current_buffer = _aligned_malloc(alloc_size, RTE_CACHE_LINE_SIZE); +#else + current_buffer = aligned_alloc(RTE_CACHE_LINE_SIZE, alloc_size); +#endif + RTE_VERIFY(current_buffer != NULL); + + current_buffer->prev = prev; + + offset = 0; + } + + handle = ¤t_buffer->data[offset]; + + offset += size; + + RTE_LCORE_VAR_FOREACH(lcore_id, value, handle) + memset(value, 0, size); + + EAL_LOG(DEBUG, "Allocated %"PRIuPTR" bytes of per-lcore data with a " + "%"PRIuPTR"-byte alignment", size, align); + + return handle; +} + +void * +rte_lcore_var_alloc(size_t size, size_t align) +{ + /* Having the per-lcore buffer size aligned on cache lines + * assures as well as having the base pointer aligned on cache size + * assures that aligned offsets also translate to aligned pointers + * across all values. + */ + RTE_BUILD_BUG_ON(RTE_MAX_LCORE_VAR % RTE_CACHE_LINE_SIZE != 0); + RTE_VERIFY(align <= RTE_CACHE_LINE_SIZE); + RTE_VERIFY(size <= RTE_MAX_LCORE_VAR); + + /* '0' means asking for worst-case alignment requirements */ + if (align == 0) +#ifdef RTE_TOOLCHAIN_MSVC + /* MSVC is missing the max_align_t typedef */ + align = alignof(double); +#else + align = alignof(max_align_t); +#endif + + RTE_VERIFY(rte_is_power_of_2(align)); + + return lcore_var_alloc(size, align); +} + +void +eal_lcore_var_cleanup(void) +{ + while (current_buffer != NULL) { + struct lcore_var_buffer *prev = current_buffer->prev; + +#ifdef RTE_EXEC_ENV_WINDOWS + _aligned_free(current_buffer); +#else + free(current_buffer); +#endif + + current_buffer = prev; + } +} diff --git a/lib/eal/common/eal_common_options.c b/lib/eal/common/eal_common_options.c index f1a5e329a5..79db9a47dd 100644 --- a/lib/eal/common/eal_common_options.c +++ b/lib/eal/common/eal_common_options.c @@ -6,9 +6,6 @@ #include #include #include -#ifndef RTE_EXEC_ENV_WINDOWS -#include -#endif #include #include #include @@ -76,7 +73,9 @@ eal_long_options[] = { {OPT_HUGE_UNLINK, 2, NULL, OPT_HUGE_UNLINK_NUM }, {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM }, {OPT_LCORES, 1, NULL, OPT_LCORES_NUM }, + {OPT_LOG_COLOR, 2, NULL, OPT_LOG_COLOR_NUM }, {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM }, + {OPT_LOG_TIMESTAMP, 2, NULL, OPT_LOG_TIMESTAMP_NUM }, {OPT_TRACE, 1, NULL, OPT_TRACE_NUM }, {OPT_TRACE_DIR, 1, NULL, OPT_TRACE_DIR_NUM }, {OPT_TRACE_BUF_SIZE, 1, NULL, OPT_TRACE_BUF_SIZE_NUM }, @@ -93,7 +92,9 @@ eal_long_options[] = { {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM }, {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM }, {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM }, - {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM }, +#ifndef RTE_EXEC_ENV_WINDOWS + {OPT_SYSLOG, 2, NULL, OPT_SYSLOG_NUM }, +#endif {OPT_VDEV, 1, NULL, OPT_VDEV_NUM }, {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM }, {OPT_VFIO_VF_TOKEN, 1, NULL, OPT_VFIO_VF_TOKEN_NUM }, @@ -349,10 +350,6 @@ eal_reset_internal_config(struct internal_config *internal_cfg) } internal_cfg->base_virtaddr = 0; -#ifdef LOG_DAEMON - internal_cfg->syslog_facility = LOG_DAEMON; -#endif - /* if set to NONE, interrupt mode is determined automatically */ internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE; memset(internal_cfg->vfio_vf_token, 0, @@ -1297,47 +1294,6 @@ eal_parse_lcores(const char *lcores) return ret; } -#ifndef RTE_EXEC_ENV_WINDOWS -static int -eal_parse_syslog(const char *facility, struct internal_config *conf) -{ - int i; - static const struct { - const char *name; - int value; - } map[] = { - { "auth", LOG_AUTH }, - { "cron", LOG_CRON }, - { "daemon", LOG_DAEMON }, - { "ftp", LOG_FTP }, - { "kern", LOG_KERN }, - { "lpr", LOG_LPR }, - { "mail", LOG_MAIL }, - { "news", LOG_NEWS }, - { "syslog", LOG_SYSLOG }, - { "user", LOG_USER }, - { "uucp", LOG_UUCP }, - { "local0", LOG_LOCAL0 }, - { "local1", LOG_LOCAL1 }, - { "local2", LOG_LOCAL2 }, - { "local3", LOG_LOCAL3 }, - { "local4", LOG_LOCAL4 }, - { "local5", LOG_LOCAL5 }, - { "local6", LOG_LOCAL6 }, - { "local7", LOG_LOCAL7 }, - { NULL, 0 } - }; - - for (i = 0; map[i].name; i++) { - if (!strcmp(facility, map[i].name)) { - conf->syslog_facility = map[i].value; - return 0; - } - } - return -1; -} -#endif - static void eal_log_usage(void) { @@ -1640,6 +1596,59 @@ eal_parse_huge_unlink(const char *arg, struct hugepage_file_discipline *out) return -1; } +bool +eal_option_is_log(int opt) +{ + switch (opt) { + case OPT_LOG_COLOR_NUM: + case OPT_LOG_LEVEL_NUM: + case OPT_LOG_TIMESTAMP_NUM: + case OPT_SYSLOG_NUM: + return true; + default: + return false; + } +} + +/* Parse all arguments looking for log related ones */ +int +eal_parse_log_options(int argc, char * const argv[]) +{ + struct internal_config *internal_conf = eal_get_internal_configuration(); + int option_index, opt; + const int old_optind = optind; + const int old_optopt = optopt; + const int old_opterr = opterr; + char *old_optarg = optarg; +#ifdef RTE_EXEC_ENV_FREEBSD + const int old_optreset = optreset; + optreset = 1; +#endif + + optind = 1; + opterr = 0; + + while ((opt = getopt_long(argc, argv, eal_short_options, + eal_long_options, &option_index)) != EOF) { + + if (!eal_option_is_log(opt)) + continue; + + if (eal_parse_common_option(opt, optarg, internal_conf) < 0) + return -1; + } + + /* restore getopt lib */ + optind = old_optind; + optopt = old_optopt; + optarg = old_optarg; + opterr = old_opterr; +#ifdef RTE_EXEC_ENV_FREEBSD + optreset = old_optreset; +#endif + return 0; +} + int eal_parse_common_option(int opt, const char *optarg, struct internal_config *conf) @@ -1837,7 +1846,7 @@ eal_parse_common_option(int opt, const char *optarg, #ifndef RTE_EXEC_ENV_WINDOWS case OPT_SYSLOG_NUM: - if (eal_parse_syslog(optarg, conf) < 0) { + if (eal_log_syslog(optarg) < 0) { EAL_LOG(ERR, "invalid parameters for --" OPT_SYSLOG); return -1; @@ -1845,7 +1854,7 @@ eal_parse_common_option(int opt, const char *optarg, break; #endif - case OPT_LOG_LEVEL_NUM: { + case OPT_LOG_LEVEL_NUM: if (eal_parse_log_level(optarg) < 0) { EAL_LOG(ERR, "invalid parameters for --" @@ -1853,7 +1862,22 @@ eal_parse_common_option(int opt, const char *optarg, return -1; } break; - } + + case OPT_LOG_TIMESTAMP_NUM: + if (eal_log_timestamp(optarg) < 0) { + EAL_LOG(ERR, "invalid parameters for --" + OPT_LOG_TIMESTAMP); + return -1; + } + break; + + case OPT_LOG_COLOR_NUM: + if (eal_log_color(optarg) < 0) { + EAL_LOG(ERR, "invalid parameters for --" + OPT_LOG_COLOR); + return -1; + } + break; #ifndef RTE_EXEC_ENV_WINDOWS case OPT_TRACE_NUM: { @@ -2214,12 +2238,14 @@ eal_common_usage(void) " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" #ifndef RTE_EXEC_ENV_WINDOWS - " --"OPT_SYSLOG" Set syslog facility\n" + " --"OPT_SYSLOG"[=] Enable use of syslog (and optionally set facility)\n" #endif " --"OPT_LOG_LEVEL"= Set global log level\n" " --"OPT_LOG_LEVEL"=:\n" " Set specific log level\n" " --"OPT_LOG_LEVEL"=help Show log types and levels\n" + " --"OPT_LOG_TIMESTAMP"[=] Timestamp log output\n" + " --"OPT_LOG_COLOR"[=] Colorize log messages\n" #ifndef RTE_EXEC_ENV_WINDOWS " --"OPT_TRACE"=\n" " Enable trace based on regular expression trace name.\n" diff --git a/lib/eal/common/eal_internal_cfg.h b/lib/eal/common/eal_internal_cfg.h index 167ec501fa..f53ab8b4aa 100644 --- a/lib/eal/common/eal_internal_cfg.h +++ b/lib/eal/common/eal_internal_cfg.h @@ -84,7 +84,6 @@ struct internal_config { /**< true if storing all pages within single files (per-page-size, * per-node) non-legacy mode only. */ - volatile int syslog_facility; /**< facility passed to openlog() */ /** default interrupt mode for VFIO */ volatile enum rte_intr_mode vfio_intr_mode; /** the shared VF token for VFIO-PCI bound PF and VFs devices */ diff --git a/lib/eal/common/eal_lcore_var.h b/lib/eal/common/eal_lcore_var.h new file mode 100644 index 0000000000..de2c4e44a0 --- /dev/null +++ b/lib/eal/common/eal_lcore_var.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Ericsson AB. + */ + +#ifndef EAL_LCORE_VAR_H +#define EAL_LCORE_VAR_H + +void +eal_lcore_var_cleanup(void); + +#endif diff --git a/lib/eal/common/eal_options.h b/lib/eal/common/eal_options.h index 3cc9cb6412..95fb4f6108 100644 --- a/lib/eal/common/eal_options.h +++ b/lib/eal/common/eal_options.h @@ -33,8 +33,12 @@ enum { OPT_HUGE_UNLINK_NUM, #define OPT_LCORES "lcores" OPT_LCORES_NUM, +#define OPT_LOG_COLOR "log-color" + OPT_LOG_COLOR_NUM, #define OPT_LOG_LEVEL "log-level" OPT_LOG_LEVEL_NUM, +#define OPT_LOG_TIMESTAMP "log-timestamp" + OPT_LOG_TIMESTAMP_NUM, #define OPT_TRACE "trace" OPT_TRACE_NUM, #define OPT_TRACE_DIR "trace-dir" @@ -96,6 +100,8 @@ enum { extern const char eal_short_options[]; extern const struct option eal_long_options[]; +bool eal_option_is_log(int opt); +int eal_parse_log_options(int argc, char * const argv[]); int eal_parse_common_option(int opt, const char *argv, struct internal_config *conf); int eal_option_device_parse(void); diff --git a/lib/eal/common/meson.build b/lib/eal/common/meson.build index c1bbf26654..e273745e93 100644 --- a/lib/eal/common/meson.build +++ b/lib/eal/common/meson.build @@ -18,6 +18,7 @@ sources += files( 'eal_common_interrupts.c', 'eal_common_launch.c', 'eal_common_lcore.c', + 'eal_common_lcore_var.c', 'eal_common_mcfg.c', 'eal_common_memalloc.c', 'eal_common_memory.c', diff --git a/lib/eal/common/rte_random.c b/lib/eal/common/rte_random.c index 90e91b3c4f..cf0756f26a 100644 --- a/lib/eal/common/rte_random.c +++ b/lib/eal/common/rte_random.c @@ -11,6 +11,7 @@ #include #include #include +#include #include struct __rte_cache_aligned rte_rand_state { @@ -19,14 +20,12 @@ struct __rte_cache_aligned rte_rand_state { uint64_t z3; uint64_t z4; uint64_t z5; - RTE_CACHE_GUARD; }; -/* One instance each for every lcore id-equipped thread, and one - * additional instance to be shared by all others threads (i.e., all - * unregistered non-EAL threads). - */ -static struct rte_rand_state rand_states[RTE_MAX_LCORE + 1]; +RTE_LCORE_VAR_HANDLE(struct rte_rand_state, rand_state); + +/* instance to be shared by all unregistered non-EAL threads */ +static struct rte_rand_state unregistered_rand_state; static uint32_t __rte_rand_lcg32(uint32_t *seed) @@ -85,8 +84,14 @@ rte_srand(uint64_t seed) unsigned int lcore_id; /* add lcore_id to seed to avoid having the same sequence */ - for (lcore_id = 0; lcore_id < RTE_DIM(rand_states); lcore_id++) - __rte_srand_lfsr258(seed + lcore_id, &rand_states[lcore_id]); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + struct rte_rand_state *lcore_state = + RTE_LCORE_VAR_LCORE(lcore_id, rand_state); + + __rte_srand_lfsr258(seed + lcore_id, lcore_state); + } + + __rte_srand_lfsr258(seed + lcore_id, &unregistered_rand_state); } static __rte_always_inline uint64_t @@ -124,11 +129,10 @@ struct rte_rand_state *__rte_rand_get_state(void) idx = rte_lcore_id(); - /* last instance reserved for unregistered non-EAL threads */ if (unlikely(idx == LCORE_ID_ANY)) - idx = RTE_MAX_LCORE; + return &unregistered_rand_state; - return &rand_states[idx]; + return RTE_LCORE_VAR(rand_state); } uint64_t @@ -228,6 +232,8 @@ RTE_INIT(rte_rand_init) { uint64_t seed; + RTE_LCORE_VAR_ALLOC(rand_state); + seed = __rte_random_initial_seed(); rte_srand(seed); diff --git a/lib/eal/common/rte_service.c b/lib/eal/common/rte_service.c index 324471e897..dad3150df9 100644 --- a/lib/eal/common/rte_service.c +++ b/lib/eal/common/rte_service.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -78,7 +79,7 @@ struct __rte_cache_aligned core_state { static uint32_t rte_service_count; static struct rte_service_spec_impl *rte_services; -static struct core_state *lcore_states; +static RTE_LCORE_VAR_HANDLE(struct core_state, lcore_states); static uint32_t rte_service_library_initialized; int32_t @@ -99,12 +100,8 @@ rte_service_init(void) goto fail_mem; } - lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE, - sizeof(struct core_state), RTE_CACHE_LINE_SIZE); - if (!lcore_states) { - EAL_LOG(ERR, "error allocating core states array"); - goto fail_mem; - } + if (lcore_states == NULL) + RTE_LCORE_VAR_ALLOC(lcore_states); int i; struct rte_config *cfg = rte_eal_get_configuration(); @@ -120,7 +117,6 @@ rte_service_init(void) return 0; fail_mem: rte_free(rte_services); - rte_free(lcore_states); return -ENOMEM; } @@ -134,7 +130,6 @@ rte_service_finalize(void) rte_eal_mp_wait_lcore(); rte_free(rte_services); - rte_free(lcore_states); rte_service_library_initialized = 0; } @@ -284,7 +279,6 @@ rte_service_component_register(const struct rte_service_spec *spec, int32_t rte_service_component_unregister(uint32_t id) { - uint32_t i; struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); @@ -292,9 +286,11 @@ rte_service_component_unregister(uint32_t id) s->internal_flags &= ~(SERVICE_F_REGISTERED); + unsigned int lcore_id; + struct core_state *cs; /* clear the run-bit in all cores */ - for (i = 0; i < RTE_MAX_LCORE; i++) - rte_bitset_clear(lcore_states[i].mapped_services, id); + RTE_LCORE_VAR_FOREACH(lcore_id, cs, lcore_states) + rte_bitset_clear(cs->mapped_services, id); memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl)); @@ -463,7 +459,10 @@ rte_service_may_be_active(uint32_t id) return -EINVAL; for (i = 0; i < lcore_count; i++) { - if (rte_bitset_test(lcore_states[ids[i]].service_active_on_lcore, id)) + struct core_state *cs = + RTE_LCORE_VAR_LCORE(ids[i], lcore_states); + + if (rte_bitset_test(cs->service_active_on_lcore, id)) return 1; } @@ -473,7 +472,7 @@ rte_service_may_be_active(uint32_t id) int32_t rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe) { - struct core_state *cs = &lcore_states[rte_lcore_id()]; + struct core_state *cs = RTE_LCORE_VAR(lcore_states); struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); @@ -496,8 +495,7 @@ static int32_t service_runner_func(void *arg) { RTE_SET_USED(arg); - const int lcore = rte_lcore_id(); - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR(lcore_states); rte_atomic_store_explicit(&cs->thread_active, 1, rte_memory_order_seq_cst); @@ -533,13 +531,15 @@ service_runner_func(void *arg) int32_t rte_service_lcore_may_be_active(uint32_t lcore) { - if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core) + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); + + if (lcore >= RTE_MAX_LCORE || !cs->is_service_core) return -EINVAL; /* Load thread_active using ACQUIRE to avoid instructions dependent on * the result being re-ordered before this load completes. */ - return rte_atomic_load_explicit(&lcore_states[lcore].thread_active, + return rte_atomic_load_explicit(&cs->thread_active, rte_memory_order_acquire); } @@ -547,9 +547,12 @@ int32_t rte_service_lcore_count(void) { int32_t count = 0; - uint32_t i; - for (i = 0; i < RTE_MAX_LCORE; i++) - count += lcore_states[i].is_service_core; + + unsigned int lcore_id; + struct core_state *cs; + RTE_LCORE_VAR_FOREACH(lcore_id, cs, lcore_states) + count += cs->is_service_core; + return count; } @@ -566,7 +569,8 @@ rte_service_lcore_list(uint32_t array[], uint32_t n) uint32_t i; uint32_t idx = 0; for (i = 0; i < RTE_MAX_LCORE; i++) { - struct core_state *cs = &lcore_states[i]; + struct core_state *cs = + RTE_LCORE_VAR_LCORE(i, lcore_states); if (cs->is_service_core) { array[idx] = i; idx++; @@ -582,7 +586,7 @@ rte_service_lcore_count_services(uint32_t lcore) if (lcore >= RTE_MAX_LCORE) return -EINVAL; - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); if (!cs->is_service_core) return -ENOTSUP; @@ -634,28 +638,30 @@ rte_service_start_with_defaults(void) static int32_t service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled) { + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); + /* validate ID, or return error value */ if (!service_valid(sid) || lcore >= RTE_MAX_LCORE || - !lcore_states[lcore].is_service_core) + !cs->is_service_core) return -EINVAL; if (set) { - uint64_t lcore_mapped = rte_bitset_test(lcore_states[lcore].mapped_services, sid); + bool lcore_mapped = rte_bitset_test(cs->mapped_services, sid); if (*set && !lcore_mapped) { - rte_bitset_set(lcore_states[lcore].mapped_services, sid); + rte_bitset_set(cs->mapped_services, sid); rte_atomic_fetch_add_explicit(&rte_services[sid].num_mapped_cores, 1, rte_memory_order_relaxed); } if (!*set && lcore_mapped) { - rte_bitset_clear(lcore_states[lcore].mapped_services, sid); + rte_bitset_clear(cs->mapped_services, sid); rte_atomic_fetch_sub_explicit(&rte_services[sid].num_mapped_cores, 1, rte_memory_order_relaxed); } } if (enabled) - *enabled = rte_bitset_test(lcore_states[lcore].mapped_services, sid); + *enabled = rte_bitset_test(cs->mapped_services, sid); return 0; } @@ -683,13 +689,14 @@ set_lcore_state(uint32_t lcore, int32_t state) { /* mark core state in hugepage backed config */ struct rte_config *cfg = rte_eal_get_configuration(); + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); cfg->lcore_role[lcore] = state; /* mark state in process local lcore_config */ lcore_config[lcore].core_role = state; /* update per-lcore optimized state tracking */ - lcore_states[lcore].is_service_core = (state == ROLE_SERVICE); + cs->is_service_core = (state == ROLE_SERVICE); rte_eal_trace_service_lcore_state_change(lcore, state); } @@ -700,14 +707,16 @@ rte_service_lcore_reset_all(void) /* loop over cores, reset all mapped services */ uint32_t i; for (i = 0; i < RTE_MAX_LCORE; i++) { - if (lcore_states[i].is_service_core) { - rte_bitset_clear_all(lcore_states[i].mapped_services, RTE_SERVICE_NUM_MAX); + struct core_state *cs = RTE_LCORE_VAR_LCORE(i, lcore_states); + + if (cs->is_service_core) { + rte_bitset_clear_all(cs->mapped_services, RTE_SERVICE_NUM_MAX); set_lcore_state(i, ROLE_RTE); /* runstate act as guard variable Use * store-release memory order here to synchronize * with load-acquire in runstate read functions. */ - rte_atomic_store_explicit(&lcore_states[i].runstate, + rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED, rte_memory_order_release); } } @@ -723,17 +732,19 @@ rte_service_lcore_add(uint32_t lcore) { if (lcore >= RTE_MAX_LCORE) return -EINVAL; - if (lcore_states[lcore].is_service_core) + + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); + if (cs->is_service_core) return -EALREADY; set_lcore_state(lcore, ROLE_SERVICE); /* ensure that after adding a core the mask and state are defaults */ - rte_bitset_clear_all(lcore_states[lcore].mapped_services, RTE_SERVICE_NUM_MAX); + rte_bitset_clear_all(cs->mapped_services, RTE_SERVICE_NUM_MAX); /* Use store-release memory order here to synchronize with * load-acquire in runstate read functions. */ - rte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED, + rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED, rte_memory_order_release); return rte_eal_wait_lcore(lcore); @@ -745,7 +756,7 @@ rte_service_lcore_del(uint32_t lcore) if (lcore >= RTE_MAX_LCORE) return -EINVAL; - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); if (!cs->is_service_core) return -EINVAL; @@ -769,7 +780,7 @@ rte_service_lcore_start(uint32_t lcore) if (lcore >= RTE_MAX_LCORE) return -EINVAL; - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); if (!cs->is_service_core) return -EINVAL; @@ -799,6 +810,8 @@ rte_service_lcore_start(uint32_t lcore) int32_t rte_service_lcore_stop(uint32_t lcore) { + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); + if (lcore >= RTE_MAX_LCORE) return -EINVAL; @@ -806,12 +819,11 @@ rte_service_lcore_stop(uint32_t lcore) * memory order here to synchronize with store-release * in runstate update functions. */ - if (rte_atomic_load_explicit(&lcore_states[lcore].runstate, rte_memory_order_acquire) == + if (rte_atomic_load_explicit(&cs->runstate, rte_memory_order_acquire) == RUNSTATE_STOPPED) return -EALREADY; uint32_t i; - struct core_state *cs = &lcore_states[lcore]; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { bool enabled = rte_bitset_test(cs->mapped_services, i); @@ -831,7 +843,7 @@ rte_service_lcore_stop(uint32_t lcore) /* Use store-release memory order here to synchronize with * load-acquire in runstate read functions. */ - rte_atomic_store_explicit(&lcore_states[lcore].runstate, RUNSTATE_STOPPED, + rte_atomic_store_explicit(&cs->runstate, RUNSTATE_STOPPED, rte_memory_order_release); rte_eal_trace_service_lcore_stop(lcore); @@ -842,7 +854,7 @@ rte_service_lcore_stop(uint32_t lcore) static uint64_t lcore_attr_get_loops(unsigned int lcore) { - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); return rte_atomic_load_explicit(&cs->loops, rte_memory_order_relaxed); } @@ -850,7 +862,7 @@ lcore_attr_get_loops(unsigned int lcore) static uint64_t lcore_attr_get_cycles(unsigned int lcore) { - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); return rte_atomic_load_explicit(&cs->cycles, rte_memory_order_relaxed); } @@ -858,7 +870,7 @@ lcore_attr_get_cycles(unsigned int lcore) static uint64_t lcore_attr_get_service_calls(uint32_t service_id, unsigned int lcore) { - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); return rte_atomic_load_explicit(&cs->service_stats[service_id].calls, rte_memory_order_relaxed); @@ -885,7 +897,7 @@ lcore_attr_get_service_error_calls(uint32_t service_id, unsigned int lcore) static uint64_t lcore_attr_get_service_cycles(uint32_t service_id, unsigned int lcore) { - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); return rte_atomic_load_explicit(&cs->service_stats[service_id].cycles, rte_memory_order_relaxed); @@ -901,7 +913,10 @@ attr_get(uint32_t id, lcore_attr_get_fun lcore_attr_get) uint64_t sum = 0; for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) { - if (lcore_states[lcore].is_service_core) + struct core_state *cs = + RTE_LCORE_VAR_LCORE(lcore, lcore_states); + + if (cs->is_service_core) sum += lcore_attr_get(id, lcore); } @@ -963,12 +978,11 @@ int32_t rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id, uint64_t *attr_value) { - struct core_state *cs; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); if (lcore >= RTE_MAX_LCORE || !attr_value) return -EINVAL; - cs = &lcore_states[lcore]; if (!cs->is_service_core) return -ENOTSUP; @@ -993,7 +1007,8 @@ rte_service_attr_reset_all(uint32_t id) return -EINVAL; for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) { - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = + RTE_LCORE_VAR_LCORE(lcore, lcore_states); cs->service_stats[id] = (struct service_stats) {}; } @@ -1004,12 +1019,11 @@ rte_service_attr_reset_all(uint32_t id) int32_t rte_service_lcore_attr_reset_all(uint32_t lcore) { - struct core_state *cs; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); if (lcore >= RTE_MAX_LCORE) return -EINVAL; - cs = &lcore_states[lcore]; if (!cs->is_service_core) return -ENOTSUP; @@ -1044,7 +1058,7 @@ static void service_dump_calls_per_lcore(FILE *f, uint32_t lcore) { uint32_t i; - struct core_state *cs = &lcore_states[lcore]; + struct core_state *cs = RTE_LCORE_VAR_LCORE(lcore, lcore_states); fprintf(f, "%02d\t", lcore); for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c index 1229230063..a96bbf5836 100644 --- a/lib/eal/freebsd/eal.c +++ b/lib/eal/freebsd/eal.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -47,12 +46,14 @@ #include "eal_private.h" #include "eal_thread.h" +#include "eal_lcore_var.h" #include "eal_internal_cfg.h" #include "eal_filesystem.h" #include "eal_hugepages.h" #include "eal_options.h" #include "eal_memcfg.h" #include "eal_trace.h" +#include "log_internal.h" #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) @@ -363,48 +364,6 @@ eal_get_hugepage_mem_size(void) return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX; } -/* Parse the arguments for --log-level only */ -static void -eal_log_level_parse(int argc, char **argv) -{ - int opt; - char **argvopt; - int option_index; - const int old_optind = optind; - const int old_optopt = optopt; - const int old_optreset = optreset; - char * const old_optarg = optarg; - struct internal_config *internal_conf = - eal_get_internal_configuration(); - - argvopt = argv; - optind = 1; - optreset = 1; - - while ((opt = getopt_long(argc, argvopt, eal_short_options, - eal_long_options, &option_index)) != EOF) { - - int ret; - - /* getopt is not happy, stop right now */ - if (opt == '?') - break; - - ret = (opt == OPT_LOG_LEVEL_NUM) ? - eal_parse_common_option(opt, optarg, internal_conf) : 0; - - /* common parser is not happy */ - if (ret < 0) - break; - } - - /* restore getopt lib */ - optind = old_optind; - optopt = old_optopt; - optreset = old_optreset; - optarg = old_optarg; -} - /* Parse the argument given in the command line of the application */ static int eal_parse_args(int argc, char **argv) @@ -434,8 +393,8 @@ eal_parse_args(int argc, char **argv) goto out; } - /* eal_log_level_parse() already handled this option */ - if (opt == OPT_LOG_LEVEL_NUM) + /* eal_parse_log_options() already handled this option */ + if (eal_option_is_log(opt)) continue; ret = eal_parse_common_option(opt, optarg, internal_conf); @@ -571,8 +530,7 @@ rte_eal_iopl_init(void) static void rte_eal_init_alert(const char *msg) { - fprintf(stderr, "EAL: FATAL: %s\n", msg); - EAL_LOG(ERR, "%s", msg); + EAL_LOG(ALERT, "%s", msg); } /* Launch threads, called at application init(). */ @@ -590,6 +548,15 @@ rte_eal_init(int argc, char **argv) bool has_phys_addr; enum rte_iova_mode iova_mode; + /* setup log as early as possible */ + if (eal_parse_log_options(argc, argv) < 0) { + rte_eal_init_alert("invalid log arguments."); + rte_errno = EINVAL; + return -1; + } + + eal_log_init(getprogname()); + /* checks if the machine is adequate */ if (!rte_cpu_is_supported()) { rte_eal_init_alert("unsupported cpu type."); @@ -616,9 +583,6 @@ rte_eal_init(int argc, char **argv) /* clone argv to report out later in telemetry */ eal_save_args(argc, argv); - /* set log level as early as possible */ - eal_log_level_parse(argc, argv); - if (rte_eal_cpu_init() < 0) { rte_eal_init_alert("Cannot detect lcores."); rte_errno = ENOTSUP; @@ -941,6 +905,7 @@ rte_eal_cleanup(void) /* after this point, any DPDK pointers will become dangling */ rte_eal_memory_detach(); eal_cleanup_config(internal_conf); + eal_lcore_var_cleanup(); return 0; } diff --git a/lib/eal/include/generic/rte_cpuflags.h b/lib/eal/include/generic/rte_cpuflags.h index bfe9df4516..26d5229b8e 100644 --- a/lib/eal/include/generic/rte_cpuflags.h +++ b/lib/eal/include/generic/rte_cpuflags.h @@ -33,15 +33,11 @@ struct rte_cpu_intrinsics { }; /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * * Check CPU support for various intrinsics at runtime. * * @param intrinsics * Pointer to a structure to be filled. */ -__rte_experimental void rte_cpu_get_intrinsics_support(struct rte_cpu_intrinsics *intrinsics); diff --git a/lib/eal/include/meson.build b/lib/eal/include/meson.build index 474097f211..d903577caa 100644 --- a/lib/eal/include/meson.build +++ b/lib/eal/include/meson.build @@ -28,6 +28,7 @@ headers += files( 'rte_keepalive.h', 'rte_launch.h', 'rte_lcore.h', + 'rte_lcore_var.h', 'rte_lock_annotations.h', 'rte_malloc.h', 'rte_mcslock.h', diff --git a/lib/eal/include/rte_common.h b/lib/eal/include/rte_common.h index c79f9ed319..4d299f2b36 100644 --- a/lib/eal/include/rte_common.h +++ b/lib/eal/include/rte_common.h @@ -366,6 +366,15 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) #define __rte_noreturn __attribute__((noreturn)) #endif +/** + * Hint point in program never reached + */ +#if defined(RTE_TOOLCHAIN_GCC) || defined(RTE_TOOLCHAIN_CLANG) +#define __rte_unreachable() __extension__(__builtin_unreachable()) +#else +#define __rte_unreachable() __assume(0) +#endif + /** * Issue a warning in case the function's return value is ignored. * @@ -423,6 +432,22 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) #define __rte_cold __attribute__((cold)) #endif +/** + * Hint precondition + * + * @warning Depending on the compiler, any code in ``condition`` might be executed. + * This currently only occurs with GCC prior to version 13. + */ +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 130000) +#define __rte_assume(condition) __attribute__((assume(condition))) +#elif defined(RTE_TOOLCHAIN_GCC) +#define __rte_assume(condition) do { if (!(condition)) __rte_unreachable(); } while (0) +#elif defined(RTE_TOOLCHAIN_CLANG) +#define __rte_assume(condition) __extension__(__builtin_assume(condition)) +#else +#define __rte_assume(condition) __assume(condition) +#endif + /** * Disable AddressSanitizer on some code */ diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h index 7deae47af3..549b9e68c5 100644 --- a/lib/eal/include/rte_lcore.h +++ b/lib/eal/include/rte_lcore.h @@ -359,9 +359,6 @@ struct rte_lcore_usage { typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage *usage); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Register a callback from an application to be called in rte_lcore_dump() and * the /eal/lcore/info telemetry endpoint handler. Applications are expected to * report lcore usage statistics via this callback. @@ -373,7 +370,6 @@ typedef int (*rte_lcore_usage_cb)(unsigned int lcore_id, struct rte_lcore_usage * @param cb * The callback function. */ -__rte_experimental void rte_lcore_register_usage_cb(rte_lcore_usage_cb cb); /** diff --git a/lib/eal/include/rte_lcore_var.h b/lib/eal/include/rte_lcore_var.h new file mode 100644 index 0000000000..28d88cd89b --- /dev/null +++ b/lib/eal/include/rte_lcore_var.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Ericsson AB + */ + +#ifndef RTE_LCORE_VAR_H +#define RTE_LCORE_VAR_H + +/** + * @file + * + * Lcore variables + * + * This API provides a mechanism to create and access per-lcore id + * variables in a space- and cycle-efficient manner. + * + * Please refer to the lcore variables' programmer's guide + * for an overview of this API and its implementation. + */ + +#include +#include + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Given the lcore variable type, produces the type of the lcore variable handle. + */ +#define RTE_LCORE_VAR_HANDLE_TYPE(type) \ + type * + +/** + * Define an lcore variable handle. + * + * This macro defines a variable which is used as a handle + * to access the various instances of a per-lcore id variable. + * + * This macro clarifies that the declaration is an lcore handle, + * not a regular pointer. + * + * Add @b static as a prefix in case the lcore variable + * is only to be accessed from a particular translation unit. + */ +#define RTE_LCORE_VAR_HANDLE(type, name) \ + RTE_LCORE_VAR_HANDLE_TYPE(type) name + +/** + * Allocate space for an lcore variable, and initialize its handle. + * + * The values of the lcore variable are initialized to zero. + */ +#define RTE_LCORE_VAR_ALLOC_SIZE_ALIGN(handle, size, align) \ + handle = rte_lcore_var_alloc(size, align) + +/** + * Allocate space for an lcore variable, and initialize its handle, + * with values aligned for any type of object. + * + * The values of the lcore variable are initialized to zero. + */ +#define RTE_LCORE_VAR_ALLOC_SIZE(handle, size) \ + RTE_LCORE_VAR_ALLOC_SIZE_ALIGN(handle, size, 0) + +/** + * Allocate space for an lcore variable of the size and alignment requirements + * suggested by the handle pointer type, and initialize its handle. + * + * The values of the lcore variable are initialized to zero. + */ +#define RTE_LCORE_VAR_ALLOC(handle) \ + RTE_LCORE_VAR_ALLOC_SIZE_ALIGN(handle, sizeof(*(handle)), \ + alignof(typeof(*(handle)))) + +/** + * Allocate an explicitly-sized, explicitly-aligned lcore variable + * by means of a @ref RTE_INIT constructor. + * + * The values of the lcore variable are initialized to zero. + */ +#define RTE_LCORE_VAR_INIT_SIZE_ALIGN(name, size, align) \ + RTE_INIT(rte_lcore_var_init_ ## name) \ + { \ + RTE_LCORE_VAR_ALLOC_SIZE_ALIGN(name, size, align); \ + } + +/** + * Allocate an explicitly-sized lcore variable + * by means of a @ref RTE_INIT constructor. + * + * The values of the lcore variable are initialized to zero. + */ +#define RTE_LCORE_VAR_INIT_SIZE(name, size) \ + RTE_LCORE_VAR_INIT_SIZE_ALIGN(name, size, 0) + +/** + * Allocate an lcore variable by means of a @ref RTE_INIT constructor. + * + * The values of the lcore variable are initialized to zero. + */ +#define RTE_LCORE_VAR_INIT(name) \ + RTE_INIT(rte_lcore_var_init_ ## name) \ + { \ + RTE_LCORE_VAR_ALLOC(name); \ + } + +/** + * Get void pointer to lcore variable instance with the specified lcore id. + * + * @param lcore_id + * The lcore id specifying which of the @c RTE_MAX_LCORE value + * instances should be accessed. The lcore id need not be valid + * (e.g., may be @ref LCORE_ID_ANY), but in such a case, + * the pointer is also not valid (and thus should not be dereferenced). + * @param handle + * The lcore variable handle. + */ +/* access function 8< */ +static inline void * +rte_lcore_var_lcore(unsigned int lcore_id, void *handle) +{ + return RTE_PTR_ADD(handle, lcore_id * RTE_MAX_LCORE_VAR); +} +/* >8 end of access function */ + +/** + * Get pointer to lcore variable instance with the specified lcore id. + * + * @param lcore_id + * The lcore id specifying which of the @c RTE_MAX_LCORE value + * instances should be accessed. The lcore id need not be valid + * (e.g., may be @ref LCORE_ID_ANY), but in such a case, + * the pointer is also not valid (and thus should not be dereferenced). + * @param handle + * The lcore variable handle. + */ +#define RTE_LCORE_VAR_LCORE(lcore_id, handle) \ + ((typeof(handle))rte_lcore_var_lcore(lcore_id, handle)) + +/** + * Get pointer to lcore variable instance of the current thread. + * + * May only be used by EAL threads and registered non-EAL threads. + */ +#define RTE_LCORE_VAR(handle) \ + RTE_LCORE_VAR_LCORE(rte_lcore_id(), handle) + +/** + * Iterate over each lcore id's value for an lcore variable. + * + * @param lcore_id + * An unsigned int variable successively set to the + * lcore id of every valid lcore id (up to @c RTE_MAX_LCORE). + * @param value + * A pointer variable successively set to point to lcore variable + * value instance of the current lcore id being processed. + * @param handle + * The lcore variable handle. + */ +#define RTE_LCORE_VAR_FOREACH(lcore_id, value, handle) \ + for ((lcore_id) = \ + (((value) = RTE_LCORE_VAR_LCORE(0, handle)), 0); \ + (lcore_id) < RTE_MAX_LCORE; \ + (lcore_id)++, (value) = RTE_LCORE_VAR_LCORE(lcore_id, \ + handle)) + +/** + * Allocate space in the per-lcore id buffers for an lcore variable. + * + * The pointer returned is only an opaque identifier of the variable. + * To get an actual pointer to a particular instance of the variable, + * use @ref RTE_LCORE_VAR or @ref RTE_LCORE_VAR_LCORE. + * + * The lcore variable values' memory is set to zero. + * + * The allocation is always successful, + * barring a fatal exhaustion of the per-lcore id buffer space. + * + * rte_lcore_var_alloc() is not multi-thread safe. + * + * The allocated memory cannot be freed. + * + * @param size + * The size (in bytes) of the variable's per-lcore id value. Must be > 0. + * @param align + * If 0, the values will be suitably aligned for any kind of type + * (i.e., alignof(max_align_t)). Otherwise, the values will be aligned + * on a multiple of *align*, which must be a power of 2 + * and equal or less than @c RTE_CACHE_LINE_SIZE. + * @return + * The variable's handle, stored in a void pointer value. + * The value is always non-NULL. + */ +__rte_experimental +void * +rte_lcore_var_alloc(size_t size, size_t align) + __rte_alloc_size(1) __rte_alloc_align(2); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_LCORE_VAR_H */ diff --git a/lib/eal/include/rte_memzone.h b/lib/eal/include/rte_memzone.h index 931497f37c..e1563994d5 100644 --- a/lib/eal/include/rte_memzone.h +++ b/lib/eal/include/rte_memzone.h @@ -65,9 +65,6 @@ struct rte_memzone { } __rte_packed; /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Set the maximum number of memzones. * * This function can only be called prior to rte_eal_init(). @@ -77,13 +74,9 @@ struct rte_memzone { * @return * 0 on success, -1 otherwise. */ -__rte_experimental int rte_memzone_max_set(size_t max); /** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * * Get the maximum number of memzones. * * @note: The maximum value will not change after calling rte_eal_init(). @@ -91,7 +84,6 @@ int rte_memzone_max_set(size_t max); * @return * Maximum number of memzones. */ -__rte_experimental size_t rte_memzone_max_get(void); /** diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c index 54577b7718..a6220524a4 100644 --- a/lib/eal/linux/eal.c +++ b/lib/eal/linux/eal.c @@ -45,6 +45,7 @@ #include #include "eal_private.h" #include "eal_thread.h" +#include "eal_lcore_var.h" #include "eal_internal_cfg.h" #include "eal_filesystem.h" #include "eal_hugepages.h" @@ -546,45 +547,6 @@ eal_parse_vfio_vf_token(const char *vf_token) return -1; } -/* Parse the arguments for --log-level only */ -static void -eal_log_level_parse(int argc, char **argv) -{ - int opt; - char **argvopt; - int option_index; - const int old_optind = optind; - const int old_optopt = optopt; - char * const old_optarg = optarg; - struct internal_config *internal_conf = - eal_get_internal_configuration(); - - argvopt = argv; - optind = 1; - - while ((opt = getopt_long(argc, argvopt, eal_short_options, - eal_long_options, &option_index)) != EOF) { - - int ret; - - /* getopt is not happy, stop right now */ - if (opt == '?') - break; - - ret = (opt == OPT_LOG_LEVEL_NUM) ? - eal_parse_common_option(opt, optarg, internal_conf) : 0; - - /* common parser is not happy */ - if (ret < 0) - break; - } - - /* restore getopt lib */ - optind = old_optind; - optopt = old_optopt; - optarg = old_optarg; -} - static int eal_parse_huge_worker_stack(const char *arg) { @@ -649,8 +611,8 @@ eal_parse_args(int argc, char **argv) goto out; } - /* eal_log_level_parse() already handled this option */ - if (opt == OPT_LOG_LEVEL_NUM) + /* eal_parse_log_options() already handled this option */ + if (eal_option_is_log(opt)) continue; ret = eal_parse_common_option(opt, optarg, internal_conf); @@ -869,8 +831,7 @@ rte_eal_iopl_init(void) static void rte_eal_init_alert(const char *msg) { - fprintf(stderr, "EAL: FATAL: %s\n", msg); - EAL_LOG(ERR, "%s", msg); + EAL_LOG(ALERT, "%s", msg); } /* @@ -966,6 +927,15 @@ rte_eal_init(int argc, char **argv) struct internal_config *internal_conf = eal_get_internal_configuration(); + /* setup log as early as possible */ + if (eal_parse_log_options(argc, argv) < 0) { + rte_eal_init_alert("invalid log arguments."); + rte_errno = EINVAL; + return -1; + } + + eal_log_init(program_invocation_short_name); + /* checks if the machine is adequate */ if (!rte_cpu_is_supported()) { rte_eal_init_alert("unsupported cpu type."); @@ -989,9 +959,6 @@ rte_eal_init(int argc, char **argv) eal_reset_internal_config(internal_conf); - /* set log level as early as possible */ - eal_log_level_parse(argc, argv); - /* clone argv to report out later in telemetry */ eal_save_args(argc, argv); @@ -1143,14 +1110,6 @@ rte_eal_init(int argc, char **argv) #endif } - if (eal_log_init(program_invocation_short_name, - internal_conf->syslog_facility) < 0) { - rte_eal_init_alert("Cannot init logging."); - rte_errno = ENOMEM; - rte_atomic_store_explicit(&run_once, 0, rte_memory_order_relaxed); - return -1; - } - #ifdef VFIO_PRESENT if (rte_vfio_enable("vfio")) { rte_eal_init_alert("Cannot init VFIO"); @@ -1370,6 +1329,7 @@ rte_eal_cleanup(void) rte_eal_memory_detach(); rte_eal_malloc_heap_cleanup(); eal_cleanup_config(internal_conf); + eal_lcore_var_cleanup(); rte_eal_log_cleanup(); return 0; } diff --git a/lib/eal/unix/meson.build b/lib/eal/unix/meson.build index cc7d67dd32..f1eb82e16a 100644 --- a/lib/eal/unix/meson.build +++ b/lib/eal/unix/meson.build @@ -11,3 +11,8 @@ sources += files( 'eal_unix_timer.c', 'rte_thread.c', ) + +if is_freebsd or cc.has_function('pthread_attr_setaffinity_np', args: '-D_GNU_SOURCE', + prefix : '#include ') + cflags += '-DRTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP' +endif diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c index 1b4c73f58e..ea629c2065 100644 --- a/lib/eal/unix/rte_thread.c +++ b/lib/eal/unix/rte_thread.c @@ -19,6 +19,7 @@ struct eal_tls_key { pthread_key_t thread_index; }; +#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP struct thread_start_context { rte_thread_func thread_func; void *thread_args; @@ -28,6 +29,7 @@ struct thread_start_context { int wrapper_ret; bool wrapper_done; }; +#endif static int thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri, @@ -88,6 +90,7 @@ thread_map_os_priority_to_eal_priority(int policy, int os_pri, return 0; } +#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP static void * thread_start_wrapper(void *arg) { @@ -113,6 +116,7 @@ thread_start_wrapper(void *arg) return (void *)(uintptr_t)thread_func(thread_args); } +#endif int rte_thread_create(rte_thread_t *thread_id, @@ -126,6 +130,7 @@ rte_thread_create(rte_thread_t *thread_id, .sched_priority = 0, }; int policy = SCHED_OTHER; +#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP struct thread_start_context ctx = { .thread_func = thread_func, .thread_args = args, @@ -134,6 +139,7 @@ rte_thread_create(rte_thread_t *thread_id, .wrapper_mutex = PTHREAD_MUTEX_INITIALIZER, .wrapper_cond = PTHREAD_COND_INITIALIZER, }; +#endif if (thread_attr != NULL) { ret = pthread_attr_init(&attr); @@ -144,6 +150,16 @@ rte_thread_create(rte_thread_t *thread_id, attrp = &attr; +#ifdef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP + if (CPU_COUNT(&thread_attr->cpuset) > 0) { + ret = pthread_attr_setaffinity_np(attrp, sizeof(thread_attr->cpuset), + &thread_attr->cpuset); + if (ret != 0) { + EAL_LOG(DEBUG, "pthread_attr_setaffinity_np failed"); + goto cleanup; + } + } +#endif /* * Set the inherit scheduler parameter to explicit, * otherwise the priority attribute is ignored. @@ -178,6 +194,14 @@ rte_thread_create(rte_thread_t *thread_id, } } +#ifdef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP + ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, + (void *)(void *)thread_func, args); + if (ret != 0) { + EAL_LOG(DEBUG, "pthread_create failed"); + goto cleanup; + } +#else /* !RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP */ ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, thread_start_wrapper, &ctx); if (ret != 0) { @@ -193,6 +217,7 @@ rte_thread_create(rte_thread_t *thread_id, if (ret != 0) rte_thread_join(*thread_id, NULL); +#endif /* RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP */ cleanup: if (attrp != NULL) diff --git a/lib/eal/version.map b/lib/eal/version.map index f493cd1ca7..a20c713eb1 100644 --- a/lib/eal/version.map +++ b/lib/eal/version.map @@ -23,6 +23,7 @@ DPDK_25 { rte_class_unregister; rte_cpu_get_flag_enabled; rte_cpu_get_flag_name; + rte_cpu_get_intrinsics_support; # WINDOWS_NO_EXPORT rte_cpu_is_supported; # WINDOWS_NO_EXPORT rte_cycles_vmware_tsc_map; # WINDOWS_NO_EXPORT rte_delay_us; @@ -164,6 +165,7 @@ DPDK_25 { rte_lcore_iterate; rte_lcore_to_cpu_id; rte_lcore_to_socket_id; + rte_lcore_register_usage_cb; rte_malloc; rte_malloc_dump_heaps; rte_malloc_dump_stats; @@ -223,6 +225,8 @@ DPDK_25 { rte_memzone_dump; rte_memzone_free; rte_memzone_lookup; + rte_memzone_max_get; + rte_memzone_max_set; rte_memzone_reserve; rte_memzone_reserve_aligned; rte_memzone_reserve_bounded; @@ -384,21 +388,16 @@ EXPERIMENTAL { # added in 20.11 __rte_eal_trace_generic_size_t; # WINDOWS_NO_EXPORT - rte_cpu_get_intrinsics_support; # WINDOWS_NO_EXPORT # added in 23.03 - rte_lcore_register_usage_cb; __rte_eal_trace_generic_blob; - # added in 23.07 - rte_memzone_max_get; - rte_memzone_max_set; - # added in 24.03 rte_vfio_get_device_info; # WINDOWS_NO_EXPORT # added in 24.11 rte_bitset_to_str; + rte_lcore_var_alloc; }; INTERNAL { diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c index 28b78a95a6..5cdc053a02 100644 --- a/lib/eal/windows/eal.c +++ b/lib/eal/windows/eal.c @@ -16,6 +16,7 @@ #include #include #include +#include "eal_lcore_var.h" #include #include #include @@ -96,41 +97,6 @@ eal_usage(const char *prgname) } } -/* Parse the arguments for --log-level only */ -static void -eal_log_level_parse(int argc, char **argv) -{ - int opt; - char **argvopt; - int option_index; - struct internal_config *internal_conf = - eal_get_internal_configuration(); - - argvopt = argv; - - eal_reset_internal_config(internal_conf); - - while ((opt = getopt_long(argc, argvopt, eal_short_options, - eal_long_options, &option_index)) != EOF) { - - int ret; - - /* getopt is not happy, stop right now */ - if (opt == '?') - break; - - ret = (opt == OPT_LOG_LEVEL_NUM) ? - eal_parse_common_option(opt, optarg, - internal_conf) : 0; - - /* common parser is not happy */ - if (ret < 0) - break; - } - - optind = 0; /* reset getopt lib */ -} - /* Parse the argument given in the command line of the application */ static int eal_parse_args(int argc, char **argv) @@ -155,8 +121,8 @@ eal_parse_args(int argc, char **argv) return -1; } - /* eal_log_level_parse() already handled this option */ - if (opt == OPT_LOG_LEVEL_NUM) + /* eal_parse_log_options() already handled this option */ + if (eal_option_is_log(opt)) continue; ret = eal_parse_common_option(opt, optarg, internal_conf); @@ -216,8 +182,7 @@ sync_func(void *arg __rte_unused) static void rte_eal_init_alert(const char *msg) { - fprintf(stderr, "EAL: FATAL: %s\n", msg); - EAL_LOG(ERR, "%s", msg); + EAL_LOG(ALERT, "%s", msg); } /* Stubs to enable EAL trace point compilation @@ -268,6 +233,7 @@ rte_eal_cleanup(void) /* after this point, any DPDK pointers will become dangling */ rte_eal_memory_detach(); eal_cleanup_config(internal_conf); + eal_lcore_var_cleanup(); return 0; } @@ -285,9 +251,14 @@ rte_eal_init(int argc, char **argv) char cpuset[RTE_CPU_AFFINITY_STR_LEN]; char thread_name[RTE_THREAD_NAME_SIZE]; - eal_log_init(NULL, 0); + /* setup log as early as possible */ + if (eal_parse_log_options(argc, argv) < 0) { + rte_eal_init_alert("invalid log arguments."); + rte_errno = EINVAL; + return -1; + } - eal_log_level_parse(argc, argv); + eal_log_init(NULL); if (eal_create_cpu_map() < 0) { rte_eal_init_alert("Cannot discover CPU and NUMA."); diff --git a/lib/eal/windows/getopt.c b/lib/eal/windows/getopt.c index a1f51c6c23..50ff71b930 100644 --- a/lib/eal/windows/getopt.c +++ b/lib/eal/windows/getopt.c @@ -20,7 +20,7 @@ #include #include -const char *optarg; /* argument associated with option */ +char *optarg; /* argument associated with option */ int opterr = 1; /* if error message should be printed */ int optind = 1; /* index into parent argv vector */ int optopt = '?'; /* character checked for validity */ @@ -39,9 +39,9 @@ static void pass(const char *a) {(void) a; } #define BADARG ((*options == ':') ? (int)':' : (int)'?') #define INORDER 1 -#define EMSG "" +static char EMSG[] = ""; -static const char *place = EMSG; /* option letter processing */ +static char *place = EMSG; /* option letter processing */ /* XXX: set optreset to 1 rather than these two */ static int nonopt_start = -1; /* first non option argument (for permute) */ @@ -80,7 +80,7 @@ gcd(int a, int b) */ static void permute_args(int panonopt_start, int panonopt_end, int opt_end, - char **nargv) + char * const *nargv) { int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; char *swap; @@ -101,11 +101,12 @@ permute_args(int panonopt_start, int panonopt_end, int opt_end, pos -= nnonopts; else pos += nopts; + swap = nargv[pos]; /* LINTED const cast */ - ((char **) nargv)[pos] = nargv[cstart]; + ((char **)(uintptr_t)nargv)[pos] = nargv[cstart]; /* LINTED const cast */ - ((char **)nargv)[cstart] = swap; + ((char **)(uintptr_t)nargv)[cstart] = swap; } } } @@ -116,7 +117,7 @@ permute_args(int panonopt_start, int panonopt_end, int opt_end, * Returns -1 if short_too is set and the option does not match long_options. */ static int -parse_long_options(char **nargv, const char *options, +parse_long_options(char * const *nargv, const char *options, const struct option *long_options, int *idx, int short_too) { const char *current_argv; @@ -236,7 +237,7 @@ parse_long_options(char **nargv, const char *options, * Parse argc/argv argument vector. Called by user level routines. */ static int -getopt_internal(int nargc, char **nargv, const char *options, +getopt_internal(int nargc, char *const nargv[], const char *options, const struct option *long_options, int *idx, int flags) { char *oli; /* option letter list index */ @@ -434,7 +435,7 @@ getopt_internal(int nargc, char **nargv, const char *options, * Parse argc/argv argument vector. */ int -getopt(int nargc, char *nargv[], const char *options) +getopt(int nargc, char *const nargv[], const char *options) { return getopt_internal(nargc, nargv, options, NULL, NULL, FLAG_PERMUTE); @@ -445,7 +446,7 @@ getopt(int nargc, char *nargv[], const char *options) * Parse argc/argv argument vector. */ int -getopt_long(int nargc, char *nargv[], const char *options, +getopt_long(int nargc, char *const nargv[], const char *options, const struct option *long_options, int *idx) { @@ -458,7 +459,7 @@ getopt_long(int nargc, char *nargv[], const char *options, * Parse argc/argv argument vector. */ int -getopt_long_only(int nargc, char *nargv[], const char *options, +getopt_long_only(int nargc, char *const nargv[], const char *options, const struct option *long_options, int *idx) { diff --git a/lib/eal/windows/include/getopt.h b/lib/eal/windows/include/getopt.h index 6f57af454b..e4cf6873cb 100644 --- a/lib/eal/windows/include/getopt.h +++ b/lib/eal/windows/include/getopt.h @@ -44,7 +44,7 @@ /** argument to current option, or NULL if it has none */ -extern const char *optarg; +extern char *optarg; /** Current position in arg string. Starts from 1. * Setting to 0 resets state. */ @@ -80,14 +80,14 @@ struct option { }; /** Compat: getopt */ -int getopt(int argc, char *argv[], const char *options); +int getopt(int argc, char *const argv[], const char *options); /** Compat: getopt_long */ -int getopt_long(int argc, char *argv[], const char *options, +int getopt_long(int argc, char *const argv[], const char *options, const struct option *longopts, int *longindex); /** Compat: getopt_long_only */ -int getopt_long_only(int nargc, char *argv[], const char *options, +int getopt_long_only(int nargc, char *const argv[], const char *options, const struct option *long_options, int *idx); diff --git a/lib/eal/windows/include/rte_os_shim.h b/lib/eal/windows/include/rte_os_shim.h index eda8113662..0e74eb19c7 100644 --- a/lib/eal/windows/include/rte_os_shim.h +++ b/lib/eal/windows/include/rte_os_shim.h @@ -30,6 +30,8 @@ #define write(fd, buf, n) _write(fd, buf, n) #define close(fd) _close(fd) #define unlink(path) _unlink(path) +#define fileno(f) _fileno(f) +#define isatty(fd) _isatty(fd) #define IPVERSION 4 @@ -110,4 +112,14 @@ rte_clock_gettime(clockid_t clock_id, struct timespec *tp) } #define clock_gettime(clock_id, tp) rte_clock_gettime(clock_id, tp) +static inline struct tm * +rte_localtime_r(const time_t *timep, struct tm *result) +{ + if (localtime_s(result, timep) == 0) + return result; + else + return NULL; +} +#define localtime_r(timep, result) rte_localtime_r(timep, result) + #endif /* _RTE_OS_SHIM_ */ diff --git a/lib/eal/x86/rte_power_intrinsics.c b/lib/eal/x86/rte_power_intrinsics.c index 6d9b64240c..e4cb913590 100644 --- a/lib/eal/x86/rte_power_intrinsics.c +++ b/lib/eal/x86/rte_power_intrinsics.c @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -14,10 +15,14 @@ /* * Per-lcore structure holding current status of C0.2 sleeps. */ -static alignas(RTE_CACHE_LINE_SIZE) struct power_wait_status { +struct power_wait_status { rte_spinlock_t lock; volatile void *monitor_addr; /**< NULL if not currently sleeping */ -} wait_status[RTE_MAX_LCORE]; +}; + +RTE_LCORE_VAR_HANDLE(struct power_wait_status, wait_status); + +RTE_LCORE_VAR_INIT(wait_status); /* * This function uses UMONITOR/UMWAIT instructions and will enter C0.2 state. @@ -172,7 +177,7 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc, if (pmc->fn == NULL) return -EINVAL; - s = &wait_status[lcore_id]; + s = RTE_LCORE_VAR_LCORE(lcore_id, wait_status); /* update sleep address */ rte_spinlock_lock(&s->lock); @@ -264,7 +269,7 @@ rte_power_monitor_wakeup(const unsigned int lcore_id) if (lcore_id >= RTE_MAX_LCORE) return -EINVAL; - s = &wait_status[lcore_id]; + s = RTE_LCORE_VAR_LCORE(lcore_id, wait_status); /* * There is a race condition between sleep, wakeup and locking, but we @@ -303,8 +308,7 @@ int rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[], const uint32_t num, const uint64_t tsc_timestamp) { - const unsigned int lcore_id = rte_lcore_id(); - struct power_wait_status *s = &wait_status[lcore_id]; + struct power_wait_status *s = RTE_LCORE_VAR(wait_status); uint32_t i, rc; /* check if supported */ diff --git a/lib/efd/rte_efd.c b/lib/efd/rte_efd.c index d3b732f2e8..3cbb3c2719 100644 --- a/lib/efd/rte_efd.c +++ b/lib/efd/rte_efd.c @@ -212,7 +212,7 @@ struct efd_offline_chunk_rules { struct efd_online_group_entry { efd_hashfunc_t hash_idx[RTE_EFD_VALUE_NUM_BITS]; efd_lookuptbl_t lookup_table[RTE_EFD_VALUE_NUM_BITS]; -} __rte_packed; +}; /** * A single chunk record, containing EFD_TARGET_CHUNK_NUM_RULES rules. @@ -228,7 +228,7 @@ struct efd_online_chunk { struct efd_online_group_entry groups[EFD_CHUNK_NUM_GROUPS]; /**< Array of all the groups in the chunk. */ -} __rte_packed; +}; /** * EFD table structure diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c index b52f1dd9f2..a8d01e2595 100644 --- a/lib/ethdev/rte_class_eth.c +++ b/lib/ethdev/rte_class_eth.c @@ -50,8 +50,10 @@ eth_mac_cmp(const char *key __rte_unused, if (rte_ether_unformat_addr(value, &mac) < 0) return -1; /* invalid devargs value */ + if (rte_eth_dev_info_get(data->port_id, &dev_info) != 0) + return -1; /* device MAC address unavailable */ + /* Return 0 if devargs MAC is matching one of the device MACs. */ - rte_eth_dev_info_get(data->port_id, &dev_info); for (index = 0; index < dev_info.max_mac_addrs; index++) if (rte_is_same_ether_addr(&mac, &data->mac_addrs[index])) return 0; diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index c4241d048c..1f71cad244 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -171,6 +171,10 @@ #include "rte_ethdev_trace_fp.h" #include "rte_dev_info.h" +#ifdef __cplusplus +extern "C" { +#endif + extern int rte_eth_dev_logtype; #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype @@ -1460,9 +1464,17 @@ enum rte_eth_tunnel_type { RTE_ETH_TUNNEL_TYPE_MAX, }; +#ifdef __cplusplus +} +#endif + /* Deprecated API file for rte_eth_dev_filter_* functions */ #include "rte_eth_ctrl.h" +#ifdef __cplusplus +extern "C" { +#endif + /** * UDP tunneling configuration. * @@ -3063,7 +3075,8 @@ int rte_eth_allmulticast_get(uint16_t port_id); * - (-ENODEV) if *port_id* invalid. * - (-EINVAL) if bad parameter. */ -int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link); +int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link) + __rte_warn_unused_result; /** * Retrieve the link status (up/down), the duplex mode (half/full), @@ -3079,7 +3092,8 @@ int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link); * - (-ENODEV) if *port_id* invalid. * - (-EINVAL) if bad parameter. */ -int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link); +int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link) + __rte_warn_unused_result; /** * @warning @@ -3487,7 +3501,8 @@ int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, * - (-ENODEV) if *port_id* invalid. * - (-EINVAL) if bad parameter. */ -int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info); +int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) + __rte_warn_unused_result; /** * @warning @@ -3505,7 +3520,8 @@ int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info); * - (-EINVAL) if bad parameter. */ __rte_experimental -int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf); +int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) + __rte_warn_unused_result; /** * Retrieve the firmware version of a device. @@ -3527,8 +3543,8 @@ int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf); * - (>0) if *fw_size* is not enough to store firmware version, return * the size of the non truncated string. */ -int rte_eth_dev_fw_version_get(uint16_t port_id, - char *fw_version, size_t fw_size); +int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) + __rte_warn_unused_result; /** * Retrieve the supported packet types of an Ethernet device. @@ -3570,7 +3586,9 @@ int rte_eth_dev_fw_version_get(uint16_t port_id, * - (-EINVAL) if bad parameter. */ int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, - uint32_t *ptypes, int num); + uint32_t *ptypes, int num) + __rte_warn_unused_result; + /** * Inform Ethernet device about reduced range of packet types to handle. * @@ -5197,7 +5215,8 @@ int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info * - (-EIO) if device is removed. * - others depends on the specific operations implementation. */ -int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info); +int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) + __rte_warn_unused_result; /** * Retrieve size of device EEPROM @@ -5269,8 +5288,8 @@ int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info); */ __rte_experimental int -rte_eth_dev_get_module_info(uint16_t port_id, - struct rte_eth_dev_module_info *modinfo); +rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) + __rte_warn_unused_result; /** * @warning @@ -5293,8 +5312,8 @@ rte_eth_dev_get_module_info(uint16_t port_id, */ __rte_experimental int -rte_eth_dev_get_module_eeprom(uint16_t port_id, - struct rte_dev_eeprom_info *info); +rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) + __rte_warn_unused_result; /** * Set the list of multicast addresses to filter on an Ethernet device. @@ -6115,6 +6134,10 @@ int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config * __rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config); +#ifdef __cplusplus +} +#endif + #include #ifdef __cplusplus @@ -6997,7 +7020,8 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, * - (-EINVAL) if bad parameter. */ __rte_experimental -int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num); +int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) + __rte_warn_unused_result; /** * @warning diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h index af855e3467..36148f8d86 100644 --- a/lib/eventdev/eventdev_pmd.h +++ b/lib/eventdev/eventdev_pmd.h @@ -158,16 +158,12 @@ struct __rte_cache_aligned rte_eventdev { uint8_t attached : 1; /**< Flag indicating the device is attached */ - event_enqueue_t enqueue; - /**< Pointer to PMD enqueue function. */ event_enqueue_burst_t enqueue_burst; /**< Pointer to PMD enqueue burst function. */ event_enqueue_burst_t enqueue_new_burst; /**< Pointer to PMD enqueue burst function(op new variant) */ event_enqueue_burst_t enqueue_forward_burst; /**< Pointer to PMD enqueue burst function(op forward variant) */ - event_dequeue_t dequeue; - /**< Pointer to PMD dequeue function. */ event_dequeue_burst_t dequeue_burst; /**< Pointer to PMD dequeue burst function. */ event_maintain_t maintain; diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c index b628f4a69e..6df129fc2d 100644 --- a/lib/eventdev/eventdev_private.c +++ b/lib/eventdev/eventdev_private.c @@ -5,15 +5,6 @@ #include "eventdev_pmd.h" #include "rte_eventdev.h" -static uint16_t -dummy_event_enqueue(__rte_unused void *port, - __rte_unused const struct rte_event *ev) -{ - RTE_EDEV_LOG_ERR( - "event enqueue requested for unconfigured event device"); - return 0; -} - static uint16_t dummy_event_enqueue_burst(__rte_unused void *port, __rte_unused const struct rte_event ev[], @@ -24,15 +15,6 @@ dummy_event_enqueue_burst(__rte_unused void *port, return 0; } -static uint16_t -dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev, - __rte_unused uint64_t timeout_ticks) -{ - RTE_EDEV_LOG_ERR( - "event dequeue requested for unconfigured event device"); - return 0; -} - static uint16_t dummy_event_dequeue_burst(__rte_unused void *port, __rte_unused struct rte_event ev[], @@ -129,11 +111,9 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op) { static void *dummy_data[RTE_MAX_QUEUES_PER_PORT]; static const struct rte_event_fp_ops dummy = { - .enqueue = dummy_event_enqueue, .enqueue_burst = dummy_event_enqueue_burst, .enqueue_new_burst = dummy_event_enqueue_burst, .enqueue_forward_burst = dummy_event_enqueue_burst, - .dequeue = dummy_event_dequeue, .dequeue_burst = dummy_event_dequeue_burst, .maintain = dummy_event_maintain, .txa_enqueue = dummy_event_tx_adapter_enqueue, @@ -153,11 +133,9 @@ void event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op, const struct rte_eventdev *dev) { - fp_op->enqueue = dev->enqueue; fp_op->enqueue_burst = dev->enqueue_burst; fp_op->enqueue_new_burst = dev->enqueue_new_burst; fp_op->enqueue_forward_burst = dev->enqueue_forward_burst; - fp_op->dequeue = dev->dequeue; fp_op->dequeue_burst = dev->dequeue_burst; fp_op->maintain = dev->maintain; fp_op->txa_enqueue = dev->txa_enqueue; diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h index b5c3c16dd0..fabd1490db 100644 --- a/lib/eventdev/rte_eventdev.h +++ b/lib/eventdev/rte_eventdev.h @@ -2596,14 +2596,8 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, } #endif rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn); - /* - * Allow zero cost non burst mode routine invocation if application - * requests nb_events as const one - */ - if (nb_events == 1) - return (fp_ops->enqueue)(port, ev); - else - return fn(port, ev, nb_events); + + return fn(port, ev, nb_events); } /** @@ -2852,15 +2846,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], } #endif rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events); - /* - * Allow zero cost non burst mode routine invocation if application - * requests nb_events as const one - */ - if (nb_events == 1) - return (fp_ops->dequeue)(port, ev, timeout_ticks); - else - return (fp_ops->dequeue_burst)(port, ev, nb_events, - timeout_ticks); + + return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks); } #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0) diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h index 2706d5e6c8..1818483044 100644 --- a/lib/eventdev/rte_eventdev_core.h +++ b/lib/eventdev/rte_eventdev_core.h @@ -12,18 +12,11 @@ extern "C" { #endif -typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev); -/**< @internal Enqueue event on port of a device */ - typedef uint16_t (*event_enqueue_burst_t)(void *port, const struct rte_event ev[], uint16_t nb_events); /**< @internal Enqueue burst of events on port of a device */ -typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev, - uint64_t timeout_ticks); -/**< @internal Dequeue event from port of a device */ - typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); @@ -60,16 +53,12 @@ typedef void (*event_preschedule_t)(void *port, struct __rte_cache_aligned rte_event_fp_ops { void **data; /**< points to array of internal port data pointers */ - event_enqueue_t enqueue; - /**< PMD enqueue function. */ event_enqueue_burst_t enqueue_burst; /**< PMD enqueue burst function. */ event_enqueue_burst_t enqueue_new_burst; /**< PMD enqueue burst new function. */ event_enqueue_burst_t enqueue_forward_burst; /**< PMD enqueue burst fwd function. */ - event_dequeue_t dequeue; - /**< PMD dequeue function. */ event_dequeue_burst_t dequeue_burst; /**< PMD dequeue burst function. */ event_maintain_t maintain; diff --git a/lib/fib/rte_fib.c b/lib/fib/rte_fib.c index fa8779462a..db79fc428e 100644 --- a/lib/fib/rte_fib.c +++ b/lib/fib/rte_fib.c @@ -346,6 +346,9 @@ rte_fib_select_lookup(struct rte_fib *fib, int rte_fib_rcu_qsbr_add(struct rte_fib *fib, struct rte_fib_rcu_config *cfg) { + if (fib == NULL) + return -EINVAL; + switch (fib->type) { case RTE_FIB_DIR24_8: return dir24_8_rcu_qsbr_add(fib->dp, cfg, fib->name); diff --git a/lib/graph/node.c b/lib/graph/node.c index f15922892e..63db629da8 100644 --- a/lib/graph/node.c +++ b/lib/graph/node.c @@ -156,7 +156,7 @@ node_clone(struct node *node, const char *name) (node->xstats->nb_xstats * RTE_NODE_XSTAT_DESC_SIZE)); if (reg->xstats == NULL) { rte_errno = ENOMEM; - goto fail; + goto free; } for (i = 0; i < node->xstats->nb_xstats; i++) @@ -178,7 +178,7 @@ node_clone(struct node *node, const char *name) /* Naming ceremony of the new node. name is node->name + "-" + name */ if (clone_name(reg->name, node->name, name)) - goto free; + goto free_xstat; rc = __rte_node_register(reg); free_xstat: diff --git a/lib/hash/meson.build b/lib/hash/meson.build index 277eb9fa93..7ce504ee8b 100644 --- a/lib/hash/meson.build +++ b/lib/hash/meson.build @@ -23,6 +23,7 @@ sources = files( 'rte_fbk_hash.c', 'rte_thash.c', 'rte_thash_gfni.c', + 'rte_thash_gf2_poly_math.c', ) deps += ['net'] diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c index 99a685f0c8..336c228e64 100644 --- a/lib/hash/rte_thash.c +++ b/lib/hash/rte_thash.c @@ -31,33 +31,6 @@ static struct rte_tailq_elem rte_thash_tailq = { }; EAL_REGISTER_TAILQ(rte_thash_tailq) -/** - * Table of some irreducible polinomials over GF(2). - * For lfsr they are represented in BE bit order, and - * x^0 is masked out. - * For example, poly x^5 + x^2 + 1 will be represented - * as (101001b & 11111b) = 01001b = 0x9 - */ -static const uint32_t irreducible_poly_table[][4] = { - {0, 0, 0, 0}, /** < degree 0 */ - {1, 1, 1, 1}, /** < degree 1 */ - {0x3, 0x3, 0x3, 0x3}, /** < degree 2 and so on... */ - {0x5, 0x3, 0x5, 0x3}, - {0x9, 0x3, 0x9, 0x3}, - {0x9, 0x1b, 0xf, 0x5}, - {0x21, 0x33, 0x1b, 0x2d}, - {0x41, 0x11, 0x71, 0x9}, - {0x71, 0xa9, 0xf5, 0x8d}, - {0x21, 0xd1, 0x69, 0x1d9}, - {0x81, 0x2c1, 0x3b1, 0x185}, - {0x201, 0x541, 0x341, 0x461}, - {0x941, 0x609, 0xe19, 0x45d}, - {0x1601, 0x1f51, 0x1171, 0x359}, - {0x2141, 0x2111, 0x2db1, 0x2109}, - {0x4001, 0x801, 0x101, 0x7301}, - {0x7781, 0xa011, 0x4211, 0x86d9}, -}; - struct thash_lfsr { uint32_t ref_cnt; uint32_t poly; @@ -159,13 +132,6 @@ get_rev_bit_lfsr(struct thash_lfsr *lfsr) return ret; } -static inline uint32_t -thash_get_rand_poly(uint32_t poly_degree) -{ - return irreducible_poly_table[poly_degree][rte_rand() % - RTE_DIM(irreducible_poly_table[poly_degree])]; -} - static inline uint32_t get_rev_poly(uint32_t poly, int degree) { @@ -191,19 +157,19 @@ get_rev_poly(uint32_t poly, int degree) } static struct thash_lfsr * -alloc_lfsr(struct rte_thash_ctx *ctx) +alloc_lfsr(uint32_t poly_degree) { struct thash_lfsr *lfsr; uint32_t i; - if (ctx == NULL) + if ((poly_degree > 32) || (poly_degree == 0)) return NULL; lfsr = rte_zmalloc(NULL, sizeof(struct thash_lfsr), 0); if (lfsr == NULL) return NULL; - lfsr->deg = ctx->reta_sz_log; + lfsr->deg = poly_degree; lfsr->poly = thash_get_rand_poly(lfsr->deg); do { lfsr->state = rte_rand() & ((1 << lfsr->deg) - 1); @@ -484,7 +450,7 @@ insert_before(struct rte_thash_ctx *ctx, int ret; if (end < cur_ent->offset) { - ent->lfsr = alloc_lfsr(ctx); + ent->lfsr = alloc_lfsr(ctx->reta_sz_log); if (ent->lfsr == NULL) { rte_free(ent); return -ENOMEM; @@ -637,7 +603,7 @@ rte_thash_add_helper(struct rte_thash_ctx *ctx, const char *name, uint32_t len, continue; } - ent->lfsr = alloc_lfsr(ctx); + ent->lfsr = alloc_lfsr(ctx->reta_sz_log); if (ent->lfsr == NULL) { rte_free(ent); return -ENOMEM; @@ -856,3 +822,29 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx, return ret; } + +int +rte_thash_gen_key(uint8_t *key, size_t key_len, size_t reta_sz_log, + uint32_t entropy_start, size_t entropy_sz) +{ + size_t i, end, start; + + /* define lfsr sequence range*/ + end = entropy_start + entropy_sz + TOEPLITZ_HASH_LEN - 1; + start = end - (entropy_sz + reta_sz_log - 1); + + if ((key == NULL) || (key_len * CHAR_BIT < entropy_start + entropy_sz) || + (entropy_sz < reta_sz_log) || (reta_sz_log > TOEPLITZ_HASH_LEN)) + return -EINVAL; + + struct thash_lfsr *lfsr = alloc_lfsr(reta_sz_log); + if (lfsr == NULL) + return -ENOMEM; + + for (i = start; i < end; i++) + set_bit(key, get_bit_lfsr(lfsr), i); + + free_lfsr(lfsr); + + return 0; +} diff --git a/lib/hash/rte_thash.h b/lib/hash/rte_thash.h index e8fdb89530..c0af5968df 100644 --- a/lib/hash/rte_thash.h +++ b/lib/hash/rte_thash.h @@ -108,6 +108,19 @@ union rte_thash_tuple { struct rte_ipv6_tuple v6; }; +/** @internal + * @brief Generates a random polynomial + * + * @param poly_degree + * degree of the polynomial + * + * @return + * random polynomial + */ +__rte_internal +uint32_t +thash_get_rand_poly(uint32_t poly_degree); + /** * Prepare special converted key to use with rte_softrss_be() * @param orig @@ -447,6 +460,35 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx, uint32_t desired_value, unsigned int attempts, rte_thash_check_tuple_t fn, void *userdata); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Modify RSS hash key such that subtuple bits corresponding to `entropy_sz` + * bits starting from `entropy_start` will have the most even distribution with + * this key with a given ReTa size. + * + * @param key + * Pointer to the RSS hash key. + * @param key_len + * Length of the key. + * @param reta_sz_log + * Log2 of the size of RSS redirection table, + * i.e. number of bits of the RSS hash value used to identify RSS ReTa entry. + * @param entropy_start + * Bit offset from the beginning of the tuple + * where user expects best distribution of the subtuple values. + * @param entropy_sz + * Size in bits of the part of subtuple. + * + * @return + * 0 on success negative otherwise + */ +__rte_experimental +int +rte_thash_gen_key(uint8_t *key, size_t key_len, size_t reta_sz_log, + uint32_t entropy_start, size_t entropy_sz); + #ifdef __cplusplus } #endif diff --git a/lib/hash/rte_thash_gf2_poly_math.c b/lib/hash/rte_thash_gf2_poly_math.c new file mode 100644 index 0000000000..1c62974e71 --- /dev/null +++ b/lib/hash/rte_thash_gf2_poly_math.c @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Intel Corporation + */ +#include +#include +#include +#include +#include +#include + +#define MAX_TOEPLITZ_KEY_LENGTH 64 +RTE_LOG_REGISTER_SUFFIX(thash_poly_logtype, thash_poly, INFO); +#define RTE_LOGTYPE_HASH thash_poly_logtype +#define HASH_LOG(level, ...) \ + RTE_LOG_LINE(level, HASH, "" __VA_ARGS__) + +/* + * Finite field math for field extensions with irreducing polynomial + * of degree <= 32. + * Polynomials are represented with 2 arguments - uint32_t poly and int degree. + * poly argument does not hold the highest degree coefficient, + * so full polynomial can be expressed as poly|(1ULL << degree). + * The algorithm to produce random irreducible polynomial was inspired by: + * "Computation in finite fields" by John Kerl, Chapter 10.4. + */ + +static const uint32_t irreducible_poly_table[][3] = { + {0, 0, 0}, /* degree 0 */ + {0x1, 0x1, 0x1}, /* degree 1 */ + {0x3, 0x3, 0x3}, /* degree 2 and so on.. */ + {0x3, 0x5, 0x3}, /* x^3+x^2+1(0x5) is reverted x^3+x+1(0x3) */ + {0x3, 0x9, 0x3}, /* x^4+x^3+1(0x9) is reverted x^4+x+1(0x3) */ + {0x5, 0xf, 0x17}, /* 0x5 <-> 0x9; 0xf <-> 0x1d; 0x17 <-> 0x1b */ + {0x3, 0x27, 0x1b}, /* 0x3 <-> 0x21; 0x27 <-> 0x33; 0x1b <-> 0x2d*/ +}; + +/* + * Table of the monic lowest weight irreducible polynomials over GF(2) + * starting from degree 7 up to degree 32. + * Taken from Handbook of Finite Fields by Gary L. Mullen and + * Daniel Penario p.33 Table 2.2.1. + * https://people.math.carleton.ca/~daniel/hff/irred/F2-2to10000.txt + */ +static const uint32_t default_irreducible_poly[] = { + 0x3, /* x^7 + x + 1*/ + 0x1b, /* x^8 + x^4 + x^3 + x + 1 */ + 0x3, /* x^9 + x + 1*/ + 0x9, /* x^10 + x^3 + 1 */ + 0x5, /* x^11 + x^2 + 1 */ + 0x9, /* x^12 + x^3 + 1 */ + 0x1b, /* x^13 + x^4 + x^3 + x + 1 */ + 0x33, /* x^14 + x^5 + 1 */ + 0x3, /* x^15 + x + 1 */ + 0x2b, /* x^16 + x^5 + x^3 + x + 1 */ + 0x9, /* x^17 + x^3 + 1 */ + 0x9, /* x^18 + x^3 + 1 */ + 0x27, /* x^19 + x^5 + x^2 + x + 1 */ + 0x9, /* x^20 + x^3 + 1 */ + 0x5, /* x^21 + x^2 + 1 */ + 0x3, /* x^22 + x + 1 */ + 0x21, /* x^23 + x^5 + 1 */ + 0x1b, /* x^24 + x^4 + x^3 + x + 1 */ + 0x9, /* x^25 + x^3 + 1 */ + 0x1b, /* x^26 + x^4 + x^3 + x + 1 */ + 0x27, /* x^27 + x^5 + x^2 + x + 1 */ + 0x3, /* x^28 + x + 1 */ + 0x5, /* x^29 + x^2 + 1 */ + 0x3, /* x^30 + x + 1 */ + 0x9, /* x^31 + x^3 + 1 */ + 0x8d, /* x^32 + x^7 + x^3 + x^2 + 1 */ +}; + +#define MAX_DIVISORS 28 /* 2^24 - 1 */ + +struct divisors { + uint32_t n; /* number of divisors */ + uint32_t div_arr[MAX_DIVISORS]; +}; + +/* divisors of (2^n - 1) less than MIN(512, 2^n - 1) for all n in [7, 32] */ +static const struct divisors divisors[] = { + { .n = 0, .div_arr = {} }, /* 2^7-1 is Mersenne prime */ + { .n = 6, .div_arr = {3, 5, 15, 17, 51, 85} }, + { .n = 2, .div_arr = {7, 73} }, + { .n = 6, .div_arr = {3, 11, 31, 33, 93, 341} }, + { .n = 2, .div_arr = {23, 89} }, + { .n = 19, .div_arr = {3, 5, 7, 9, 13, 15, 21, 35, 39, 45, 63, 65, 91, + 105, 117, 195, 273, 315, 455} }, + { .n = 0, .div_arr = {} }, /* 2^13-1 is Mersenne prime */ + { .n = 5, .div_arr = {3, 43, 127, 129, 381} }, + { .n = 4, .div_arr = {7, 31, 151, 217} }, + { .n = 8, .div_arr = {3, 5, 15, 17, 51, 85, 255, 257} }, + { .n = 0, .div_arr = {} }, /* 2^17-1 is Mersenne prime */ + { .n = 14, .div_arr = {3, 7, 9, 19, 21, 27, 57, 63, 73, 133, 171, 189, + 219, 399} }, + { .n = 0, .div_arr = {0} }, /* 2^19-1 is Mersenne prime */ + { .n = 19, .div_arr = {3, 5, 11, 15, 25, 31, 33, 41, 55, 75, 93, 123, + 155, 165, 205, 275, 341, 451, 465} }, + { .n = 4, .div_arr = {7, 49, 127, 337} }, + { .n = 5, .div_arr = {3, 23, 69, 89, 267} }, + { .n = 1, .div_arr = {47} }, + { .n = 28, .div_arr = {3, 5, 7, 9, 13, 15, 17, 21, 35, 39, 45, 51, 63, + 65, 85, 91, 105, 117, 119, 153, 195, 221, 241, 255, 273, 315, + 357, 455} }, + { .n = 1, .div_arr = {31} }, + { .n = 1, .div_arr = {3} }, + { .n = 2, .div_arr = {7, 73} }, + { .n = 14, .div_arr = {3, 5, 15, 29, 43, 87, 113, 127, 129, 145, 215, + 339, 381, 435} }, + { .n = 1, .div_arr = {233} }, + { .n = 18, .div_arr = {3, 7, 9, 11, 21, 31, 33, 63, 77, 93, 99, 151, + 217, 231, 279, 331, 341, 453} }, + { .n = 0, .div_arr = {} },/* 2^31-1 is Mersenne prime */ + { .n = 8, .div_arr = {3, 5, 15, 17, 51, 85, 255, 257} }, +}; + +static uint32_t +gf2_mul(uint32_t a, uint32_t b, uint32_t r, int degree) +{ + uint64_t product = 0; + uint64_t r_poly = r|(1ULL << degree); + + for (; b; b &= (b - 1)) + product ^= (uint64_t)a << (rte_bsf32(b)); + + for (int i = degree * 2 - 1; i >= degree; i--) + if (product & (1 << i)) + product ^= r_poly << (i - degree); + + return product; +} + +static uint32_t +gf2_pow(uint32_t a, uint32_t pow, uint32_t r, int degree) +{ + uint32_t result = 1; + unsigned int i; + + for (i = 0; i < (sizeof(pow)*CHAR_BIT - rte_clz32(pow)); i++) { + if (pow & (1 << i)) + result = gf2_mul(result, a, r, degree); + + a = gf2_mul(a, a, r, degree); + } + + return result; +} + +static uint32_t +__thash_get_rand_poly(int poly_degree) +{ + uint32_t roots[poly_degree]; + uint32_t rnd; + uint32_t ret_poly = 0; + int i, j; + bool short_orbit = false; + + /* special case for low degree */ + if (poly_degree < 7) + return irreducible_poly_table[poly_degree][rte_rand() % + RTE_DIM(irreducible_poly_table[poly_degree])]; + + uint32_t r = default_irreducible_poly[poly_degree - 7]; + + do { + short_orbit = false; + do { + rnd = rte_rand() & ((1 << poly_degree) - 1); + } while ((rnd == 0) || (rnd == 1)); + + /* + * Quick check if random returned one of the roots of + * the initial polynomial. + * In other words if we randomy got x, x^2, x^4, x^8 or x^16 + */ +#define ROOT_POLY_MSK ((1 << 1)|(1 << 2)|(1 << 4)|(1 << 8)|(1 << 16)) + if ((rte_popcount32(rnd) == 1) && (rnd & ROOT_POLY_MSK)) + return default_irreducible_poly[poly_degree - 7]; + + /* + * init array with some random polynomial roots + * applying Frobenius automorphism (i.e. squaring them) + * also checking for short orbits (i.e. if there are repeated roots) + */ + roots[0] = rnd; + for (i = 1; i < poly_degree; i++) { + roots[i] = gf2_pow(roots[i - 1], 2, r, poly_degree); + if (roots[i] == roots[0]) + short_orbit = true; + } + } while (short_orbit); + + /* + * Get coefficients of the polynomial for + * (x - roots[0])(x - roots[1])...(x - roots[n]) + */ + uint32_t poly_coefficients[poly_degree + 1]; + for (i = 0; i <= poly_degree; i++) + poly_coefficients[i] = 0; + + poly_coefficients[0] = 1; /* highest degree term coefficient in the end */ + for (i = 0; i < (int)poly_degree; i++) { + /* multiply by x */ + for (j = i; j >= 0; j--) + poly_coefficients[j + 1] = poly_coefficients[j]; + + poly_coefficients[0] = 0; + + /* multiply by root */ + for (j = 0; j <= i; j++) + poly_coefficients[j] ^= + gf2_mul(poly_coefficients[j + 1], + roots[i], r, poly_degree); + } + + for (i = 0; i < poly_degree; i++) { + if (poly_coefficients[i]) { + RTE_ASSERT(poly_coefficients[i] == 1); + ret_poly |= 1 << i; + } + } + + return ret_poly; +} + +/* test an order of the multiplicative subgroup generated by x */ +static int +thash_test_poly_order(uint32_t poly, int degree) +{ + unsigned int i; + int div_idx = degree - 7; + + if (degree < 7) + return 0; + + for (i = 0; i < divisors[div_idx].n; i++) { + if (gf2_pow(0x2, divisors[div_idx].div_arr[i], + poly, degree) == 1) + return 1; + } + + return 0; +} + +uint32_t +thash_get_rand_poly(uint32_t poly_degree) +{ + uint32_t ret_poly; + + if (poly_degree > 32) { + HASH_LOG(ERR, "Wrong polynomial degree %d, must be in range [1, 32]", poly_degree); + return 0; + } + + do + ret_poly = __thash_get_rand_poly(poly_degree); + while (thash_test_poly_order(ret_poly, poly_degree)); + + return ret_poly; +} diff --git a/lib/hash/version.map b/lib/hash/version.map index 11a5394a45..779eb1fe13 100644 --- a/lib/hash/version.map +++ b/lib/hash/version.map @@ -52,6 +52,9 @@ EXPERIMENTAL { # added in 24.07 rte_hash_rcu_qsbr_dq_reclaim; + + # added in 24.11 + rte_thash_gen_key; }; INTERNAL { @@ -59,4 +62,5 @@ INTERNAL { rte_thash_gfni_stub; rte_thash_gfni_bulk_stub; + thash_get_rand_poly; }; diff --git a/lib/ip_frag/rte_ipv4_reassembly.c b/lib/ip_frag/rte_ipv4_reassembly.c index 4a89a5f536..5818f50f40 100644 --- a/lib/ip_frag/rte_ipv4_reassembly.c +++ b/lib/ip_frag/rte_ipv4_reassembly.c @@ -101,7 +101,6 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, { struct ip_frag_pkt *fp; struct ip_frag_key key; - const unaligned_uint64_t *psd; uint16_t flag_offset, ip_ofs, ip_flag; int32_t ip_len; int32_t trim; @@ -110,9 +109,8 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, ip_ofs = (uint16_t)(flag_offset & RTE_IPV4_HDR_OFFSET_MASK); ip_flag = (uint16_t)(flag_offset & RTE_IPV4_HDR_MF_FLAG); - psd = (unaligned_uint64_t *)&ip_hdr->src_addr; /* use first 8 bytes only */ - key.src_dst[0] = psd[0]; + memcpy(&key.src_dst[0], &ip_hdr->src_addr, 8); key.id = ip_hdr->packet_id; key.key_len = IPV4_KEYLEN; diff --git a/lib/log/log.c b/lib/log/log.c index 255f757d94..eb087d601e 100644 --- a/lib/log/log.c +++ b/lib/log/log.c @@ -2,6 +2,7 @@ * Copyright(c) 2010-2014 Intel Corporation */ +#include #include #include #include @@ -11,31 +12,39 @@ #include #include #include +#include +#include #include #include -#include "log_internal.h" - #ifdef RTE_EXEC_ENV_WINDOWS -#define strdup _strdup +#include #endif +#include "log_internal.h" +#include "log_private.h" + struct rte_log_dynamic_type { const char *name; uint32_t loglevel; }; +/* Note: same as vfprintf() */ +typedef int (*log_print_t)(FILE *f, const char *fmt, va_list ap); + /** The rte_log structure. */ static struct rte_logs { uint32_t type; /**< Bitfield with enabled logs. */ uint32_t level; /**< Log level. */ FILE *file; /**< Output file set by rte_openlog_stream, or NULL. */ + log_print_t print_func; size_t dynamic_types_len; struct rte_log_dynamic_type *dynamic_types; } rte_logs = { .type = UINT32_MAX, .level = RTE_LOG_DEBUG, + .print_func = vfprintf, }; struct rte_eal_opt_loglevel { @@ -55,9 +64,6 @@ TAILQ_HEAD(rte_eal_opt_loglevel_list, rte_eal_opt_loglevel); static struct rte_eal_opt_loglevel_list opt_loglevel_list = TAILQ_HEAD_INITIALIZER(opt_loglevel_list); -/* Stream to use for logging if rte_logs.file is NULL */ -static FILE *default_log_stream; - /** * This global structure stores some information about the message * that is currently being processed by one lcore @@ -70,13 +76,12 @@ struct log_cur_msg { /* per core log */ static RTE_DEFINE_PER_LCORE(struct log_cur_msg, log_cur_msg); -/* default logs */ - /* Change the stream that will be used by logging system */ int rte_openlog_stream(FILE *f) { rte_logs.file = f; + rte_logs.print_func = vfprintf; return 0; } @@ -85,17 +90,7 @@ rte_log_get_stream(void) { FILE *f = rte_logs.file; - if (f == NULL) { - /* - * Grab the current value of stderr here, rather than - * just initializing default_log_stream to stderr. This - * ensures that we will always use the current value - * of stderr, even if the application closes and - * reopens it. - */ - return default_log_stream != NULL ? default_log_stream : stderr; - } - return f; + return (f == NULL) ? stderr : f; } /* Set global log level */ @@ -483,7 +478,7 @@ rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap) RTE_PER_LCORE(log_cur_msg).loglevel = level; RTE_PER_LCORE(log_cur_msg).logtype = logtype; - ret = vfprintf(f, format, ap); + ret = (*rte_logs.print_func)(f, format, ap); fflush(f); return ret; } @@ -506,12 +501,42 @@ rte_log(uint32_t level, uint32_t logtype, const char *format, ...) } /* - * Called by environment-specific initialization functions. + * Called by rte_eal_init */ void -eal_log_set_default(FILE *default_log) +eal_log_init(const char *id) { - default_log_stream = default_log; + /* If user has already set a log stream, then use it. */ + if (rte_logs.file == NULL) { + FILE *logf = NULL; + + /* if stderr is associated with systemd environment */ + if (log_journal_enabled()) + logf = log_journal_open(id); + /* If --syslog option was passed */ + else if (log_syslog_enabled()) + logf = log_syslog_open(id); + + /* if either syslog or journal is used, then no special handling */ + if (logf) { + rte_openlog_stream(logf); + } else { + bool is_terminal = isatty(fileno(stderr)); + bool use_color = log_color_enabled(is_terminal); + + if (log_timestamp_enabled()) { + if (use_color) + rte_logs.print_func = color_print_with_timestamp; + else + rte_logs.print_func = log_print_with_timestamp; + } else { + if (use_color) + rte_logs.print_func = color_print; + else + rte_logs.print_func = vfprintf; + } + } + } #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG RTE_LOG(NOTICE, EAL, @@ -525,8 +550,11 @@ eal_log_set_default(FILE *default_log) void rte_eal_log_cleanup(void) { - if (default_log_stream) { - fclose(default_log_stream); - default_log_stream = NULL; - } + FILE *log_stream = rte_logs.file; + + /* don't close stderr on the application */ + if (log_stream != NULL) + fclose(log_stream); + + rte_logs.file = NULL; } diff --git a/lib/log/log_color.c b/lib/log/log_color.c new file mode 100644 index 0000000000..04eb5aa485 --- /dev/null +++ b/lib/log/log_color.c @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef RTE_EXEC_ENV_WINDOWS +#include +#endif + +#include "log_internal.h" +#include "log_private.h" + +enum { + LOG_COLOR_AUTO = 0, + LOG_COLOR_NEVER, + LOG_COLOR_ALWAYS, +} log_color_mode = LOG_COLOR_NEVER; + +enum color { + COLOR_NONE, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW, + COLOR_BLUE, + COLOR_MAGENTA, + COLOR_CYAN, + COLOR_WHITE, + COLOR_BOLD, + COLOR_CLEAR, +}; + +enum log_field { + LOG_FIELD_SUBSYS, + LOG_FIELD_TIME, + LOG_FIELD_ALERT, + LOG_FIELD_ERROR, + LOG_FIELD_INFO, +}; + +static const enum color field_colors[] = { + [LOG_FIELD_SUBSYS] = COLOR_YELLOW, + [LOG_FIELD_TIME] = COLOR_GREEN, + [LOG_FIELD_ALERT] = COLOR_RED, + [LOG_FIELD_ERROR] = COLOR_BOLD, + [LOG_FIELD_INFO] = COLOR_NONE, +}; + +/* If set all colors are bolder */ +static bool dark_mode; + +/* Standard terminal escape codes for colors and bold */ +static const uint8_t color_esc_code[] = { + [COLOR_RED] = 31, + [COLOR_GREEN] = 32, + [COLOR_YELLOW] = 33, + [COLOR_BLUE] = 34, + [COLOR_MAGENTA] = 35, + [COLOR_CYAN] = 36, + [COLOR_WHITE] = 37, + [COLOR_BOLD] = 1, +}; + +__rte_format_printf(4, 5) +static int +color_snprintf(char *buf, size_t len, enum log_field field, + const char *fmt, ...) +{ + enum color color = field_colors[field]; + uint8_t esc = color_esc_code[color]; + va_list args; + int ret = 0; + + va_start(args, fmt); + if (esc == 0) { + ret = vsnprintf(buf, len, fmt, args); + } else { + ret = snprintf(buf, len, + dark_mode ? "\033[1;%um" : "\033[%um", esc); + ret += vsnprintf(buf + ret, len - ret, fmt, args); + ret += snprintf(buf + ret, len - ret, "%s", "\033[0m"); + } + va_end(args); + + return ret; +} + +/* + * Controls whether color is enabled: + * modes are: + * always - enable color output regardless + * auto - enable if stderr is a terminal + * never - color output is disabled. + */ +int +eal_log_color(const char *mode) +{ + if (mode == NULL || strcmp(mode, "always") == 0) + log_color_mode = LOG_COLOR_ALWAYS; + else if (strcmp(mode, "never") == 0) + log_color_mode = LOG_COLOR_NEVER; + else if (strcmp(mode, "auto") == 0) + log_color_mode = LOG_COLOR_AUTO; + else + return -1; + + return 0; +} + +bool +log_color_enabled(bool is_terminal) +{ + char *env, *sep; + + /* Set dark mode using the defacto heuristics used by other programs */ + env = getenv("COLORFGBG"); + if (env) { + sep = strrchr(env, ';'); + if (sep && + ((sep[1] >= '0' && sep[1] <= '6') || sep[1] == '8') && + sep[2] == '\0') + dark_mode = true; + } + + if (log_color_mode == LOG_COLOR_ALWAYS) + return true; + else if (log_color_mode == LOG_COLOR_AUTO) + return is_terminal; + else + return false; +} + +/* Look ast the current message level to determine color of field */ +static enum log_field +color_msg_field(void) +{ + const int level = rte_log_cur_msg_loglevel(); + + if (level <= 0 || level >= (int)RTE_LOG_INFO) + return LOG_FIELD_INFO; + else if (level >= (int)RTE_LOG_ERR) + return LOG_FIELD_ERROR; + else + return LOG_FIELD_ALERT; +} + +__rte_format_printf(3, 0) +static int +color_fmt_msg(char *out, size_t len, const char *format, va_list ap) +{ + enum log_field field = color_msg_field(); + char buf[LINE_MAX]; + int ret = 0; + + /* format raw message */ + vsnprintf(buf, sizeof(buf), format, ap); + const char *msg = buf; + + /* + * use convention that first part of message (up to the ':' character) + * is the subsystem id and should be highlighted. + */ + const char *cp = strchr(msg, ':'); + if (cp) { + /* print first part in yellow */ + ret = color_snprintf(out, len, LOG_FIELD_SUBSYS, + "%.*s", (int)(cp - msg + 1), msg); + /* skip the first part */ + msg = cp + 1; + } + + ret += color_snprintf(out + ret, len - ret, field, "%s", msg); + return ret; +} + +__rte_format_printf(2, 0) +int +color_print(FILE *f, const char *format, va_list ap) +{ + char out[LINE_MAX]; + + /* format raw message */ + int ret = color_fmt_msg(out, sizeof(out), format, ap); + if (fputs(out, f) < 0) + return -1; + + return ret; +} + +__rte_format_printf(2, 0) +int +color_print_with_timestamp(FILE *f, const char *format, va_list ap) +{ + char out[LINE_MAX]; + char tsbuf[128]; + int ret = 0; + + if (log_timestamp(tsbuf, sizeof(tsbuf)) > 0) + ret = color_snprintf(out, sizeof(out), + LOG_FIELD_TIME, "[%s] ", tsbuf); + + ret += color_fmt_msg(out + ret, sizeof(out) - ret, format, ap); + if (fputs(out, f) < 0) + return -1; + + return ret; +} diff --git a/lib/log/log_freebsd.c b/lib/log/log_freebsd.c deleted file mode 100644 index 698d3c5423..0000000000 --- a/lib/log/log_freebsd.c +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2023 Intel Corporation - */ - -#include -#include "log_internal.h" - -int -eal_log_init(__rte_unused const char *id, __rte_unused int facility) -{ - return 0; -} diff --git a/lib/log/log_internal.h b/lib/log/log_internal.h index 451629f1c1..bba7041ea3 100644 --- a/lib/log/log_internal.h +++ b/lib/log/log_internal.h @@ -5,8 +5,10 @@ #ifndef LOG_INTERNAL_H #define LOG_INTERNAL_H +#include #include #include +#include #include @@ -14,13 +16,7 @@ * Initialize the default log stream. */ __rte_internal -int eal_log_init(const char *id, int facility); - -/* - * Determine where log data is written when no call to rte_openlog_stream. - */ -__rte_internal -void eal_log_set_default(FILE *default_log); +void eal_log_init(const char *id); /* * Save a log option for later. @@ -30,6 +26,12 @@ int eal_log_save_regexp(const char *regexp, uint32_t level); __rte_internal int eal_log_save_pattern(const char *pattern, uint32_t level); +__rte_internal +int eal_log_syslog(const char *name); + +__rte_internal +int eal_log_journal(const char *opt); + /* * Convert log level to string. */ @@ -42,4 +44,16 @@ const char *eal_log_level2str(uint32_t level); __rte_internal void rte_eal_log_cleanup(void); +/* + * Add timestamp to console logs + */ +__rte_internal +int eal_log_timestamp(const char *fmt); + +/* + * Enable or disable color in log messages + */ +__rte_internal +int eal_log_color(const char *mode); + #endif /* LOG_INTERNAL_H */ diff --git a/lib/log/log_journal.c b/lib/log/log_journal.c new file mode 100644 index 0000000000..e9b3aa5640 --- /dev/null +++ b/lib/log/log_journal.c @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "log_private.h" + +/* + * Send structured message using journal protocol + * See: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/ + * + * Uses writev() to ensure that whole log message is in one datagram + */ +static int +journal_send(int fd, const char *buf, size_t len) +{ + struct iovec iov[4]; + unsigned int n = 0; + int priority = rte_log_cur_msg_loglevel() - 1; + char msg[] = "MESSAGE="; + char newline = '\n'; + char pbuf[32]; /* "PRIORITY=N\n" */ + + iov[n].iov_base = msg; + iov[n++].iov_len = strlen(msg); + + iov[n].iov_base = (char *)(uintptr_t)buf; + iov[n++].iov_len = len; + + /* if message doesn't end with newline, one will be applied. */ + if (buf[len - 1] != '\n') { + iov[n].iov_base = &newline; + iov[n++].iov_len = 1; + } + + /* priority value between 0 ("emerg") and 7 ("debug") */ + iov[n].iov_base = pbuf; + iov[n++].iov_len = snprintf(pbuf, sizeof(pbuf), + "PRIORITY=%d\n", priority); + return writev(fd, iov, n); +} + + +/* wrapper for log stream to put messages into journal */ +static ssize_t +journal_log_write(void *c, const char *buf, size_t size) +{ + int fd = (uintptr_t)c; + + return journal_send(fd, buf, size); +} + +static int +journal_log_close(void *c) +{ + int fd = (uintptr_t)c; + + close(fd); + return 0; +} + +static cookie_io_functions_t journal_log_func = { + .write = journal_log_write, + .close = journal_log_close, +}; + +/* + * Check if stderr is going to system journal. + * This is the documented way to handle systemd journal + * + * See: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/ + */ +bool +log_journal_enabled(void) +{ + char *jenv, *endp = NULL; + struct stat st; + unsigned long dev, ino; + + jenv = getenv("JOURNAL_STREAM"); + if (jenv == NULL) + return false; + + if (fstat(STDERR_FILENO, &st) < 0) + return false; + + /* systemd sets colon-separated list of device and inode number */ + dev = strtoul(jenv, &endp, 10); + if (endp == NULL || *endp != ':') + return false; /* missing colon */ + + ino = strtoul(endp + 1, NULL, 10); + + return dev == st.st_dev && ino == st.st_ino; +} + +/* Connect to systemd's journal service */ +FILE * +log_journal_open(const char *id) +{ + char syslog_id[PATH_MAX]; + FILE *log_stream; + int len; + struct sockaddr_un sun = { + .sun_family = AF_UNIX, + .sun_path = "/run/systemd/journal/socket", + }; + int jfd = -1; + + len = snprintf(syslog_id, sizeof(syslog_id), + "SYSLOG_IDENTIFIER=%s\nSYSLOG_PID=%u", id, getpid()); + + /* Detect truncation of message and fallback to no journal */ + if (len >= (int)sizeof(syslog_id)) + return NULL; + + jfd = socket(AF_UNIX, SOCK_DGRAM, 0); + if (jfd < 0) { + perror("socket"); + goto error; + } + + if (connect(jfd, (struct sockaddr *)&sun, sizeof(sun)) < 0) { + perror("connect"); + goto error; + } + + /* Send identifier as first message */ + if (write(jfd, syslog_id, len) != len) { + perror("write"); + goto error; + } + + /* redirect other log messages to journal */ + log_stream = fopencookie((void *)(uintptr_t)jfd, "w", journal_log_func); + if (log_stream != NULL) + return log_stream; + +error: + close(jfd); + return NULL; +} diff --git a/lib/log/log_linux.c b/lib/log/log_linux.c deleted file mode 100644 index 2dfb0c974b..0000000000 --- a/lib/log/log_linux.c +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#include -#include -#include - -#include - -#include "log_internal.h" - -/* - * default log function - */ -static ssize_t -console_log_write(__rte_unused void *c, const char *buf, size_t size) -{ - ssize_t ret; - - /* write on stderr */ - ret = fwrite(buf, 1, size, stderr); - fflush(stderr); - - /* Syslog error levels are from 0 to 7, so subtract 1 to convert */ - syslog(rte_log_cur_msg_loglevel() - 1, "%.*s", (int)size, buf); - - return ret; -} - -static int -console_log_close(__rte_unused void *c) -{ - closelog(); - return 0; -} - -static cookie_io_functions_t console_log_func = { - .write = console_log_write, - .close = console_log_close, -}; - -/* - * set the log to default function, called during eal init process, - * once memzones are available. - */ -int -eal_log_init(const char *id, int facility) -{ - FILE *log_stream; - - log_stream = fopencookie(NULL, "w+", console_log_func); - if (log_stream == NULL) - return -1; - - openlog(id, LOG_NDELAY | LOG_PID, facility); - - eal_log_set_default(log_stream); - - return 0; -} diff --git a/lib/log/log_private.h b/lib/log/log_private.h new file mode 100644 index 0000000000..f275346c7b --- /dev/null +++ b/lib/log/log_private.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#ifndef LOG_PRIVATE_H +#define LOG_PRIVATE_H + +/* Defined in limits.h on Linux */ +#ifndef LINE_MAX +#define LINE_MAX 2048 /* _POSIX2_LINE_MAX */ +#endif + +#ifdef RTE_EXEC_ENV_WINDOWS +static inline bool +log_syslog_enabled(void) +{ + return false; +} +static inline FILE * +log_syslog_open(const char *id __rte_unused) +{ + return NULL; +} +#else +bool log_syslog_enabled(void); +FILE *log_syslog_open(const char *id); +#endif + +#ifdef RTE_EXEC_ENV_LINUX +bool log_journal_enabled(void); +FILE *log_journal_open(const char *id); +#else +static inline bool +log_journal_enabled(void) +{ + return false; +} +static inline FILE * +log_journal_open(const char *id __rte_unused) +{ + return NULL; +} +#endif /* !RTE_EXEC_ENV_LINUX */ + +bool log_timestamp_enabled(void); +ssize_t log_timestamp(char *tsbuf, size_t tsbuflen); + +__rte_format_printf(2, 0) +int log_print_with_timestamp(FILE *f, const char *format, va_list ap); + +bool log_color_enabled(bool is_tty); + +__rte_format_printf(2, 0) +int color_print(FILE *f, const char *format, va_list ap); + +__rte_format_printf(2, 0) +int color_print_with_timestamp(FILE *f, const char *format, va_list ap); + +#endif /* LOG_PRIVATE_H */ diff --git a/lib/log/log_syslog.c b/lib/log/log_syslog.c new file mode 100644 index 0000000000..6b34831bf3 --- /dev/null +++ b/lib/log/log_syslog.c @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "log_internal.h" +#include "log_private.h" + +static int log_facility; + +/* + * Usable list of facilities + * Skip kern, mark, and security + */ +static const struct { + const char *name; + int value; +} facilitys[] = { + { "auth", LOG_AUTH }, + { "cron", LOG_CRON }, + { "daemon", LOG_DAEMON }, + { "ftp", LOG_FTP }, + { "kern", LOG_KERN }, + { "lpr", LOG_LPR }, + { "mail", LOG_MAIL }, + { "news", LOG_NEWS }, + { "syslog", LOG_SYSLOG }, + { "user", LOG_USER }, + { "uucp", LOG_UUCP }, + { "local0", LOG_LOCAL0 }, + { "local1", LOG_LOCAL1 }, + { "local2", LOG_LOCAL2 }, + { "local3", LOG_LOCAL3 }, + { "local4", LOG_LOCAL4 }, + { "local5", LOG_LOCAL5 }, + { "local6", LOG_LOCAL6 }, + { "local7", LOG_LOCAL7 }, +}; + +int +eal_log_syslog(const char *name) +{ + unsigned int i; + + if (name == NULL) { + log_facility = LOG_DAEMON; + return 0; + } + + for (i = 0; i < RTE_DIM(facilitys); i++) { + if (!strcmp(name, facilitys[i].name)) { + log_facility = facilitys[i].value; + return 0; + } + } + return -1; +} + +/* syslog is enabled if facility is set */ +bool +log_syslog_enabled(void) +{ + return log_facility != 0; /* LOG_KERN is 0 */ +} + +/* + * default log function + */ +static ssize_t +log_syslog_write(__rte_unused void *c, const char *buf, size_t size) +{ + /* Syslog error levels are from 0 to 7, so subtract 1 to convert */ + syslog(rte_log_cur_msg_loglevel() - 1, "%.*s", (int)size, buf); + + return size; +} + +static int +log_syslog_close(__rte_unused void *c) +{ + closelog(); + return 0; +} + +static cookie_io_functions_t log_syslog_func = { + .write = log_syslog_write, + .close = log_syslog_close, +}; + + +FILE * +log_syslog_open(const char *id) +{ + int option = LOG_CONS | LOG_NDELAY | LOG_PID | LOG_PERROR; + + openlog(id, option, log_facility); + + /* redirect other log messages to syslog as well */ + return fopencookie(NULL, "w", log_syslog_func); +} diff --git a/lib/log/log_timestamp.c b/lib/log/log_timestamp.c new file mode 100644 index 0000000000..b4b0bca6a8 --- /dev/null +++ b/lib/log/log_timestamp.c @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef RTE_EXEC_ENV_WINDOWS +#include +#endif + +#include "log_internal.h" +#include "log_private.h" + +#ifndef NS_PER_SEC +#define NS_PER_SEC 1E9 +#endif + +static enum { + LOG_TIMESTAMP_NONE = 0, + LOG_TIMESTAMP_TIME, /* time since start */ + LOG_TIMESTAMP_DELTA, /* time since last message */ + LOG_TIMESTAMP_RELTIME, /* relative time since last message */ + LOG_TIMESTAMP_CTIME, /* Unix standard time format */ + LOG_TIMESTAMP_ISO, /* ISO8601 time format */ +} log_time_format; + +static struct { + struct timespec started; /* when log was initialized */ + RTE_ATOMIC(uint64_t) last_monotonic; + RTE_ATOMIC(uint64_t) last_realtime; +} log_time; + +/* Set the log timestamp format */ +int +eal_log_timestamp(const char *str) +{ + if (str == NULL) + log_time_format = LOG_TIMESTAMP_TIME; + else if (strcmp(str, "notime") == 0) + log_time_format = LOG_TIMESTAMP_NONE; + else if (strcmp(str, "reltime") == 0) + log_time_format = LOG_TIMESTAMP_RELTIME; + else if (strcmp(str, "delta") == 0) + log_time_format = LOG_TIMESTAMP_DELTA; + else if (strcmp(str, "ctime") == 0) + log_time_format = LOG_TIMESTAMP_CTIME; + else if (strcmp(str, "iso") == 0) + log_time_format = LOG_TIMESTAMP_ISO; + else + return -1; + + return 0; +} + +bool +log_timestamp_enabled(void) +{ + return log_time_format != LOG_TIMESTAMP_NONE; +} + +/* Subtract two timespec values and handle wraparound */ +static struct timespec +timespec_sub(const struct timespec *t0, const struct timespec *t1) +{ + struct timespec ts; + + ts.tv_sec = t0->tv_sec - t1->tv_sec; + ts.tv_nsec = t0->tv_nsec - t1->tv_nsec; + if (ts.tv_nsec < 0) { + ts.tv_sec--; + ts.tv_nsec += 1000000000L; + } + return ts; +} + +/* + * Format current timespec into ISO8601 format. + * Surprisingly, can't just use strftime() for this; + * since want microseconds and the timezone offset format differs. + */ +static ssize_t +format_iso8601(char *tsbuf, size_t tsbuflen, const struct timespec *now) +{ + struct tm *tm, tbuf; + char dbuf[64]; /* "2024-05-01T22:11:00" */ + char zbuf[16] = { }; /* "+0800" */ + + tm = localtime_r(&now->tv_sec, &tbuf); + + /* make "2024-05-01T22:11:00,123456+0100" */ + if (strftime(dbuf, sizeof(dbuf), "%Y-%m-%dT%H:%M:%S", tm) == 0) + return 0; + + /* convert timezone to +hhmm */ + if (strftime(zbuf, sizeof(zbuf), "%z", tm) == 0) + return 0; + + /* the result for strftime is "+hhmm" but ISO wants "+hh:mm" */ + return snprintf(tsbuf, tsbuflen, "%s,%06lu%.3s:%.2s", + dbuf, now->tv_nsec / 1000u, + zbuf, zbuf + 3); +} + +/* + * Format a timestamp which shows time between messages. + */ +static ssize_t +format_delta(char *tsbuf, size_t tsbuflen, const struct timespec *now) +{ + struct timespec delta; + uint64_t ns = rte_timespec_to_ns(now); + uint64_t previous; + + previous = rte_atomic_exchange_explicit(&log_time.last_monotonic, + ns, rte_memory_order_seq_cst); + delta = rte_ns_to_timespec(ns - previous); + + return snprintf(tsbuf, tsbuflen, "<%6lu.%06lu>", + (unsigned long)delta.tv_sec, + (unsigned long)delta.tv_nsec / 1000u); +} + +/* + * Make a timestamp where if the minute, hour or day has + * changed from the last message, then print abbreviated + * "Month day hour:minute" format. + * Otherwise print delta from last printed message as +sec.usec + */ +static ssize_t +format_reltime(char *tsbuf, size_t tsbuflen, const struct timespec *now) +{ + struct tm *tm, *last_tm, tbuf1, tbuf2; + time_t last_sec; + uint64_t ns = rte_timespec_to_ns(now); + uint64_t previous; + + tm = localtime_r(&now->tv_sec, &tbuf1); + + previous = rte_atomic_exchange_explicit(&log_time.last_realtime, + ns, rte_memory_order_seq_cst); + last_sec = previous / NS_PER_SEC; + last_tm = localtime_r(&last_sec, &tbuf2); + if (tm->tm_min == last_tm->tm_min && + tm->tm_hour == last_tm->tm_hour && + tm->tm_yday == last_tm->tm_yday) { + struct timespec elapsed; + + elapsed = rte_ns_to_timespec(ns - previous); + + return snprintf(tsbuf, tsbuflen, "+%3lu.%06lu", + (unsigned long)elapsed.tv_sec, + (unsigned long)elapsed.tv_nsec / 1000u); + } else { + return strftime(tsbuf, tsbuflen, "%b%d %H:%M", tm); + } +} + +/* Format up a timestamp based on current format */ +ssize_t +log_timestamp(char *tsbuf, size_t tsbuflen) +{ + struct timespec now, delta; + + switch (log_time_format) { + case LOG_TIMESTAMP_NONE: + return 0; + + case LOG_TIMESTAMP_TIME: + if (clock_gettime(CLOCK_MONOTONIC, &now) < 0) + return 0; + + delta = timespec_sub(&now, &log_time.started); + + return snprintf(tsbuf, tsbuflen, "%6lu.%06lu", + (unsigned long)delta.tv_sec, + (unsigned long)delta.tv_nsec / 1000u); + + case LOG_TIMESTAMP_DELTA: + if (clock_gettime(CLOCK_MONOTONIC, &now) < 0) + return 0; + + return format_delta(tsbuf, tsbuflen, &now); + + case LOG_TIMESTAMP_RELTIME: + if (clock_gettime(CLOCK_REALTIME, &now) < 0) + return 0; + + return format_reltime(tsbuf, tsbuflen, &now); + + case LOG_TIMESTAMP_CTIME: + if (clock_gettime(CLOCK_REALTIME, &now) < 0) + return 0; + + /* trncate to remove newline from ctime result */ + return snprintf(tsbuf, tsbuflen, "%.24s", ctime(&now.tv_sec)); + + case LOG_TIMESTAMP_ISO: + if (clock_gettime(CLOCK_REALTIME, &now) < 0) + return 0; + + return format_iso8601(tsbuf, tsbuflen, &now); + } + + return 0; +} + +/* print timestamp before message */ +int +log_print_with_timestamp(FILE *f, const char *format, va_list ap) +{ + char tsbuf[128]; + char msgbuf[LINE_MAX]; + + if (log_timestamp(tsbuf, sizeof(tsbuf)) > 0) { + vsnprintf(msgbuf, sizeof(msgbuf), format, ap); + return fprintf(f, "[%s] %s", tsbuf, msgbuf); + } + + /* fall back when timestamp is unavailable */ + return vfprintf(f, format, ap); +} + +RTE_INIT_PRIO(log_timestamp_init, LOG) +{ + struct timespec now; + + clock_gettime(CLOCK_MONOTONIC, &now); + log_time.started = now; + rte_atomic_store_explicit(&log_time.last_monotonic, rte_timespec_to_ns(&now), + rte_memory_order_seq_cst); +} diff --git a/lib/log/log_windows.c b/lib/log/log_windows.c deleted file mode 100644 index a6a0889550..0000000000 --- a/lib/log/log_windows.c +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017-2018 Intel Corporation - */ - -#include -#include -#include "log_internal.h" - -/* set the log to default function, called during eal init process. */ -int -eal_log_init(__rte_unused const char *id, __rte_unused int facility) -{ - rte_openlog_stream(stderr); - - eal_log_set_default(stderr); - - return 0; -} diff --git a/lib/log/meson.build b/lib/log/meson.build index 0d4319b36f..b3de57b9c7 100644 --- a/lib/log/meson.build +++ b/lib/log/meson.build @@ -4,6 +4,16 @@ includes += global_inc sources = files( 'log.c', - 'log_' + exec_env + '.c', + 'log_color.c', + 'log_timestamp.c', ) + +if not is_windows + sources += files('log_syslog.c') +endif + +if is_linux + sources += files('log_journal.c') +endif + headers = files('rte_log.h') diff --git a/lib/log/version.map b/lib/log/version.map index 19d7f9cdb6..09d8a4289b 100644 --- a/lib/log/version.map +++ b/lib/log/version.map @@ -25,10 +25,13 @@ DPDK_25 { INTERNAL { global: + eal_log_color; eal_log_init; + eal_log_journal; # WINDOWS_NO_EXPORT eal_log_level2str; eal_log_save_pattern; eal_log_save_regexp; - eal_log_set_default; + eal_log_syslog; # WINDOWS_NO_EXPORT + eal_log_timestamp; rte_eal_log_cleanup; }; diff --git a/lib/mbuf/rte_mbuf_ptype.c b/lib/mbuf/rte_mbuf_ptype.c index d6f906b06c..ab180b3dda 100644 --- a/lib/mbuf/rte_mbuf_ptype.c +++ b/lib/mbuf/rte_mbuf_ptype.c @@ -50,6 +50,7 @@ const char *rte_get_ptype_l4_name(uint32_t ptype) case RTE_PTYPE_L4_ICMP: return "L4_ICMP"; case RTE_PTYPE_L4_NONFRAG: return "L4_NONFRAG"; case RTE_PTYPE_L4_IGMP: return "L4_IGMP"; + case RTE_PTYPE_L4_ESP: return "L4_ESP"; default: return "L4_UNKNOWN"; } } @@ -112,6 +113,7 @@ const char *rte_get_ptype_inner_l4_name(uint32_t ptype) case RTE_PTYPE_INNER_L4_SCTP: return "INNER_L4_SCTP"; case RTE_PTYPE_INNER_L4_ICMP: return "INNER_L4_ICMP"; case RTE_PTYPE_INNER_L4_NONFRAG: return "INNER_L4_NONFRAG"; + case RTE_PTYPE_INNER_L4_ESP: return "INNER_L4_ESP"; default: return "INNER_L4_UNKNOWN"; } } diff --git a/lib/mbuf/rte_mbuf_ptype.h b/lib/mbuf/rte_mbuf_ptype.h index f2276e2909..c46a94f89f 100644 --- a/lib/mbuf/rte_mbuf_ptype.h +++ b/lib/mbuf/rte_mbuf_ptype.h @@ -247,7 +247,7 @@ extern "C" { * It refers to those packets of any IP types, which can be recognized as * fragmented. A fragmented packet cannot be recognized as any other L4 types * (RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP, - * RTE_PTYPE_L4_NONFRAG). + * RTE_PTYPE_L4_NONFRAG, RTE_PTYPE_L4_IGMP, RTE_PTYPE_L4_ESP). * * Packet format: * <'ether type'=0x0800 @@ -290,14 +290,15 @@ extern "C" { * * It refers to those packets of any IP types, while cannot be recognized as * any of above L4 types (RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, - * RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP). + * RTE_PTYPE_L4_FRAG (for IPv6), RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_ICMP, + * RTE_PTYPE_L4_IGMP (for IPv4), RTE_PTYPE_L4_ESP). * * Packet format: * <'ether type'=0x0800 - * | 'version'=4, 'protocol'!=[6|17|132|1], 'MF'=0, 'frag_offset'=0> + * | 'version'=4, 'protocol'!=[1|2|6|17|50|132], 'MF'=0, 'frag_offset'=0> * or, * <'ether type'=0x86DD - * | 'version'=6, 'next header'!=[6|17|44|132|1]> + * | 'version'=6, 'next header'!=[1|6|17|44|50|132]> */ #define RTE_PTYPE_L4_NONFRAG 0x00000600 /** @@ -308,6 +309,17 @@ extern "C" { * | 'version'=4, 'protocol'=2, 'MF'=0, 'frag_offset'=0> */ #define RTE_PTYPE_L4_IGMP 0x00000700 +/** + * ESP (IP Encapsulating Security Payload) transport packet type. + * + * Packet format: + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=50, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=50> + */ +#define RTE_PTYPE_L4_ESP 0x00000800 /** * Mask of layer 4 packet types. * It is used for outer packet for tunneling cases. @@ -652,12 +664,24 @@ extern "C" { * * Packet format (inner only): * <'ether type'=0x0800 - * | 'version'=4, 'protocol'!=[6|17|132|1], 'MF'=0, 'frag_offset'=0> + * | 'version'=4, 'protocol'!=[1|6|17|50|132], 'MF'=0, 'frag_offset'=0> * or, * <'ether type'=0x86DD - * | 'version'=6, 'next header'!=[6|17|44|132|1]> + * | 'version'=6, 'next header'!=[1|6|17|44|50|132]> */ #define RTE_PTYPE_INNER_L4_NONFRAG 0x06000000 +/** + * ESP (IP Encapsulating Security Payload) transport packet type. + * It is used for inner packet only. + * + * Packet format (inner only): + * <'ether type'=0x0800 + * | 'version'=4, 'protocol'=50, 'MF'=0, 'frag_offset'=0> + * or, + * <'ether type'=0x86DD + * | 'version'=6, 'next header'=50> + */ +#define RTE_PTYPE_INNER_L4_ESP 0x08000000 /** * Mask of inner layer 4 packet types. */ diff --git a/lib/net/rte_ip4.h b/lib/net/rte_ip4.h index d9840b3cff..f9b8333332 100644 --- a/lib/net/rte_ip4.h +++ b/lib/net/rte_ip4.h @@ -39,7 +39,7 @@ extern "C" { /** * IPv4 Header */ -struct rte_ipv4_hdr { +struct __rte_aligned(2) rte_ipv4_hdr { __extension__ union { uint8_t version_ihl; /**< version and header length */ @@ -163,6 +163,41 @@ rte_ipv4_cksum(const struct rte_ipv4_hdr *ipv4_hdr) return (uint16_t)~cksum; } +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Process the IPv4 checksum of an IPv4 header without any extensions. + * + * The checksum field does NOT have to be set by the caller, the field + * is skipped by the calculation. + * + * @param ipv4_hdr + * The pointer to the contiguous IPv4 header. + * @return + * The complemented checksum to set in the IP packet. + */ +__rte_experimental +static inline uint16_t +rte_ipv4_cksum_simple(const struct rte_ipv4_hdr *ipv4_hdr) +{ + const uint16_t *v16_h; + uint32_t ip_cksum; + + /* + * Compute the sum of successive 16-bit words of the IPv4 header, + * skipping the checksum field of the header. + */ + v16_h = (const uint16_t *)ipv4_hdr; + ip_cksum = v16_h[0] + v16_h[1] + v16_h[2] + v16_h[3] + + v16_h[4] + v16_h[6] + v16_h[7] + v16_h[8] + v16_h[9]; + + /* reduce 32 bit checksum to 16 bits and complement it */ + ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16); + ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16); + return (uint16_t)(~ip_cksum); +} + /** * Process the pseudo-header checksum of an IPv4 header. * diff --git a/lib/net/rte_ip6.h b/lib/net/rte_ip6.h index 3ae38811b2..992ab5ee1f 100644 --- a/lib/net/rte_ip6.h +++ b/lib/net/rte_ip6.h @@ -84,11 +84,12 @@ static inline void rte_ipv6_addr_mask(struct rte_ipv6_addr *ip, uint8_t depth) { if (depth < RTE_IPV6_MAX_DEPTH) { - uint8_t d = depth / 8; - uint8_t mask = ~(UINT8_MAX >> (depth % 8)); + unsigned int d = depth / CHAR_BIT; + uint8_t mask = ~(UINT8_MAX >> (depth % CHAR_BIT)); ip->a[d] &= mask; d++; - memset(&ip->a[d], 0, sizeof(*ip) - d); + while (d < sizeof(*ip)) + ip->a[d++] = 0; } } @@ -108,8 +109,8 @@ static inline bool rte_ipv6_addr_eq_prefix(const struct rte_ipv6_addr *a, const struct rte_ipv6_addr *b, uint8_t depth) { if (depth < RTE_IPV6_MAX_DEPTH) { - uint8_t d = depth / 8; - uint8_t mask = ~(UINT8_MAX >> (depth % 8)); + unsigned int d = depth / CHAR_BIT; + uint8_t mask = ~(UINT8_MAX >> (depth % CHAR_BIT)); if ((a->a[d] ^ b->a[d]) & mask) return false; @@ -460,7 +461,7 @@ rte_ether_mcast_from_ipv6(struct rte_ether_addr *mac, const struct rte_ipv6_addr /** * IPv6 Header */ -struct rte_ipv6_hdr { +struct __rte_aligned(2) rte_ipv6_hdr { union { rte_be32_t vtc_flow; /**< IP version, traffic class & flow label. */ __extension__ @@ -507,7 +508,7 @@ static inline int rte_ipv6_check_version(const struct rte_ipv6_hdr *ip) /** * IPv6 Routing Extension Header */ -struct rte_ipv6_routing_ext { +struct __rte_aligned(2) rte_ipv6_routing_ext { uint8_t next_hdr; /**< Protocol, next header. */ uint8_t hdr_len; /**< Header length. */ uint8_t type; /**< Extension header type. */ @@ -751,7 +752,7 @@ rte_ipv6_udptcp_cksum_mbuf_verify(const struct rte_mbuf *m, #define RTE_IPV6_SET_FRAG_DATA(fo, mf) \ (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK)) -struct rte_ipv6_fragment_ext { +struct __rte_aligned(2) rte_ipv6_fragment_ext { uint8_t next_header; /**< Next header type */ uint8_t reserved; /**< Reserved */ rte_be16_t frag_data; /**< All fragmentation data */ diff --git a/lib/pipeline/rte_table_action.c b/lib/pipeline/rte_table_action.c index a7e63b9846..a431f8f128 100644 --- a/lib/pipeline/rte_table_action.c +++ b/lib/pipeline/rte_table_action.c @@ -109,7 +109,7 @@ mtr_cfg_check(struct rte_table_action_mtr_config *mtr) struct mtr_trtcm_data { struct rte_meter_trtcm trtcm; uint64_t stats[RTE_COLORS]; -} __rte_packed; +}; #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \ (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3) diff --git a/lib/power/meson.build b/lib/power/meson.build index 2f0f3d26e9..b3a7bc7b2e 100644 --- a/lib/power/meson.build +++ b/lib/power/meson.build @@ -12,22 +12,18 @@ if not is_linux reason = 'only supported on Linux' endif sources = files( - 'guest_channel.c', - 'power_acpi_cpufreq.c', - 'power_amd_pstate_cpufreq.c', 'power_common.c', - 'power_cppc_cpufreq.c', - 'power_kvm_vm.c', - 'power_intel_uncore.c', - 'power_pstate_cpufreq.c', - 'rte_power.c', - 'rte_power_uncore.c', + 'rte_power_cpufreq.c', 'rte_power_pmd_mgmt.c', + 'rte_power_qos.c', + 'rte_power_uncore.c', ) headers = files( - 'rte_power.h', - 'rte_power_guest_channel.h', + 'power_cpufreq.h', + 'power_uncore_ops.h', + 'rte_power_cpufreq.h', 'rte_power_pmd_mgmt.h', + 'rte_power_qos.h', 'rte_power_uncore.h', ) diff --git a/lib/power/power_common.c b/lib/power/power_common.c index b47c63a5f1..e482f71c64 100644 --- a/lib/power/power_common.c +++ b/lib/power/power_common.c @@ -13,7 +13,7 @@ #include "power_common.h" -RTE_LOG_REGISTER_DEFAULT(power_logtype, INFO); +RTE_LOG_REGISTER_DEFAULT(rte_power_logtype, INFO); #define POWER_SYSFILE_SCALING_DRIVER \ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_driver" diff --git a/lib/power/power_common.h b/lib/power/power_common.h index 82fb94d0c0..3f56b1103d 100644 --- a/lib/power/power_common.h +++ b/lib/power/power_common.h @@ -2,16 +2,17 @@ * Copyright(c) 2010-2014 Intel Corporation */ -#ifndef _POWER_COMMON_H_ -#define _POWER_COMMON_H_ +#ifndef POWER_COMMON_H +#define POWER_COMMON_H #include +#include #include #define RTE_POWER_INVALID_FREQ_INDEX (~0) -extern int power_logtype; -#define RTE_LOGTYPE_POWER power_logtype +extern int rte_power_logtype; +#define RTE_LOGTYPE_POWER rte_power_logtype #define POWER_LOG(level, ...) \ RTE_LOG_LINE(level, POWER, "" __VA_ARGS__) @@ -23,14 +24,27 @@ extern int power_logtype; #endif /* check if scaling driver matches one we want */ +__rte_internal int cpufreq_check_scaling_driver(const char *driver); + +__rte_internal int power_set_governor(unsigned int lcore_id, const char *new_governor, char *orig_governor, size_t orig_governor_len); + +__rte_internal int open_core_sysfs_file(FILE **f, const char *mode, const char *format, ...) __rte_format_printf(3, 4); + +__rte_internal int read_core_sysfs_u32(FILE *f, uint32_t *val); + +__rte_internal int read_core_sysfs_s(FILE *f, char *buf, unsigned int len); + +__rte_internal int write_core_sysfs_s(FILE *f, const char *str); + +__rte_internal int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t *cpu_id); -#endif /* _POWER_COMMON_H_ */ +#endif /* POWER_COMMON_H */ diff --git a/lib/power/power_cpufreq.h b/lib/power/power_cpufreq.h new file mode 100644 index 0000000000..92f1ab8f37 --- /dev/null +++ b/lib/power/power_cpufreq.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2024 Advanced Micro Devices, Inc. + */ + +#ifndef POWER_CPUFREQ_H +#define POWER_CPUFREQ_H + +/** + * @file + * CPU Frequency Management + */ + +#include +#include +#include + +#define RTE_POWER_DRIVER_NAMESZ 24 + +/** + * Initialize power management for a specific lcore. If rte_power_set_env() has + * not been called then an auto-detect of the environment will start and + * initialise the corresponding resources. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 on success. + * - Negative on error. + */ +typedef int (*rte_power_cpufreq_init_t)(unsigned int lcore_id); + +/** + * Exit power management on a specific lcore. This will call the environment + * dependent exit function. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 on success. + * - Negative on error. + */ +typedef int (*rte_power_cpufreq_exit_t)(unsigned int lcore_id); + +/** + * Check if a specific power management environment type is supported on a + * currently running system. + * + * @return + * - 1 if supported + * - 0 if unsupported + * - -1 if error, with rte_errno indicating reason for error. + */ +typedef int (*rte_power_check_env_support_t)(void); + +/** + * Get the available frequencies of a specific lcore. + * Function pointer definition. Review each environments + * specific documentation for usage. + * + * @param lcore_id + * lcore id. + * @param freqs + * The buffer array to save the frequencies. + * @param num + * The number of frequencies to get. + * + * @return + * The number of available frequencies. + */ +typedef uint32_t (*rte_power_freqs_t)(unsigned int lcore_id, + uint32_t *freqs, uint32_t num); + +/** + * Return the current index of available frequencies of a specific lcore. + * Function pointer definition. Review each environments + * specific documentation for usage. + * + * @param lcore_id + * lcore id. + * + * @return + * The current index of available frequencies. + */ +typedef uint32_t (*rte_power_get_freq_t)(unsigned int lcore_id); + +/** + * Set the new frequency for a specific lcore by indicating the index of + * available frequencies. + * Function pointer definition. Review each environments + * specific documentation for usage. + * + * @param lcore_id + * lcore id. + * @param index + * The index of available frequencies. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_set_freq_t)(unsigned int lcore_id, uint32_t index); + +/** + * Function pointer definition for generic frequency change functions. Review + * each environments specific documentation for usage. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_freq_change_t)(unsigned int lcore_id); + +/** + * Function pointer definition for generic frequency change functions. Review + * each environments specific documentation for usage. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ + +/** + * Power capabilities summary. + */ +struct rte_power_core_capabilities { + union { + uint64_t capabilities; + struct { + uint64_t turbo:1; /**< Turbo can be enabled. */ + uint64_t priority:1; /**< SST-BF high freq core */ + }; + }; +}; + +typedef int (*rte_power_get_capabilities_t)(unsigned int lcore_id, + struct rte_power_core_capabilities *caps); + +/** Structure defining core power operations structure */ +struct rte_power_cpufreq_ops { + RTE_TAILQ_ENTRY(rte_power_cpufreq_ops) next; /**< Next in list. */ + char name[RTE_POWER_DRIVER_NAMESZ]; /**< power mgmt driver. */ + rte_power_cpufreq_init_t init; /**< Initialize power management. */ + rte_power_cpufreq_exit_t exit; /**< Exit power management. */ + rte_power_check_env_support_t check_env_support;/**< verify env is supported. */ + rte_power_freqs_t get_avail_freqs; /**< Get the available frequencies. */ + rte_power_get_freq_t get_freq; /**< Get frequency index. */ + rte_power_set_freq_t set_freq; /**< Set frequency index. */ + rte_power_freq_change_t freq_up; /**< Scale up frequency. */ + rte_power_freq_change_t freq_down; /**< Scale down frequency. */ + rte_power_freq_change_t freq_max; /**< Scale up frequency to highest. */ + rte_power_freq_change_t freq_min; /**< Scale up frequency to lowest. */ + rte_power_freq_change_t turbo_status; /**< Get Turbo status. */ + rte_power_freq_change_t enable_turbo; /**< Enable Turbo. */ + rte_power_freq_change_t disable_turbo; /**< Disable Turbo. */ + rte_power_get_capabilities_t get_caps; /**< power capabilities. */ +}; + +/** + * Register power cpu frequency operations. + * + * @param ops + * Pointer to an ops structure to register. + * @return + * - 0: Success. + * - Negative on error. + */ +__rte_internal +int rte_power_register_cpufreq_ops(struct rte_power_cpufreq_ops *ops); + +/** + * Macro to statically register the ops of a cpufreq driver. + */ +#define RTE_POWER_REGISTER_CPUFREQ_OPS(ops) \ +RTE_INIT(power_hdlr_init_##ops) \ +{ \ + rte_power_register_cpufreq_ops(&ops); \ +} + +#endif /* POWER_CPUFREQ_H */ diff --git a/lib/power/power_uncore_ops.h b/lib/power/power_uncore_ops.h new file mode 100644 index 0000000000..b92af28df9 --- /dev/null +++ b/lib/power/power_uncore_ops.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Intel Corporation + * Copyright(c) 2024 Advanced Micro Devices, Inc. + */ + +#ifndef POWER_UNCORE_OPS_H +#define POWER_UNCORE_OPS_H + +/** + * @file + * Uncore Frequency Management + */ + +#include +#include + +#define RTE_POWER_UNCORE_DRIVER_NAMESZ 24 + +/** + * Initialize uncore frequency management for specific die on a package. + * It will get the available frequencies and prepare to set new die frequencies. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 0 on success. + * - Negative on error. + */ +typedef int (*rte_power_uncore_init_t)(unsigned int pkg, unsigned int die); + +/** + * Exit uncore frequency management on a specific die on a package. + * It will restore uncore min and* max values to previous values + * before initialization of API. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 0 on success. + * - Negative on error. + */ +typedef int (*rte_power_uncore_exit_t)(unsigned int pkg, unsigned int die); + +/** + * Return the current index of available frequencies of a specific die on a package. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * The current index of available frequencies. + * If error, it will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)'. + */ +typedef uint32_t (*rte_power_get_uncore_freq_t)(unsigned int pkg, unsigned int die); + +/** + * Set minimum and maximum uncore frequency for specified die on a package + * to specified index value. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param index + * The index of available frequencies. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_set_uncore_freq_t)(unsigned int pkg, unsigned int die, uint32_t index); + +/** + * Return the list length of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param index + * The index of available frequencies. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_set_uncore_freq_t)(unsigned int pkg, unsigned int die, uint32_t index); + +/** + * Return the list length of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +typedef int (*rte_power_uncore_get_num_freqs_t)(unsigned int pkg, unsigned int die); + +/** + * Return the list of available frequencies in the index array. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * @param freqs + * The buffer array to save the frequencies. + * @param num + * The number of frequencies to get. + * + * @return + * - The number of available index's in frequency array. + * - Negative on error. + */ +typedef int (*rte_power_uncore_freqs_t)(unsigned int pkg, unsigned int die, + uint32_t *freqs, uint32_t num); +/** + * Function pointers for generic frequency change functions. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. + */ +typedef int (*rte_power_uncore_freq_change_t)(unsigned int pkg, unsigned int die); + +/** + * Return the number of packages (CPUs) on a system + * by parsing the uncore sysfs directory. + * + * This function should NOT be called in the fast path. + * + * @return + * - Zero on error. + * - Number of package on system on success. + */ +typedef unsigned int (*rte_power_uncore_get_num_pkgs_t)(void); + +/** + * Return the number of dies for pakckages (CPUs) specified + * from parsing the uncore sysfs directory. + * + * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * + * @return + * - Zero on error. + * - Number of dies for package on sucecss. + */ +typedef unsigned int (*rte_power_uncore_get_num_dies_t)(unsigned int pkg); +typedef void (*rte_power_uncore_driver_cb_t)(void); + +/** Structure defining uncore power operations structure */ +struct rte_power_uncore_ops { + RTE_TAILQ_ENTRY(rte_power_uncore_ops) next; /**< Next in list. */ + char name[RTE_POWER_UNCORE_DRIVER_NAMESZ]; /**< power mgmt driver. */ + rte_power_uncore_driver_cb_t cb; /**< Driver specific callbacks. */ + rte_power_uncore_init_t init; /**< Initialize power management. */ + rte_power_uncore_exit_t exit; /**< Exit power management. */ + rte_power_uncore_get_num_pkgs_t get_num_pkgs; + rte_power_uncore_get_num_dies_t get_num_dies; + rte_power_uncore_get_num_freqs_t get_num_freqs; /**< Number of available frequencies. */ + rte_power_uncore_freqs_t get_avail_freqs; /**< Get the available frequencies. */ + rte_power_get_uncore_freq_t get_freq; /**< Get frequency index. */ + rte_power_set_uncore_freq_t set_freq; /**< Set frequency index. */ + rte_power_uncore_freq_change_t freq_max; /**< Scale up frequency to highest. */ + rte_power_uncore_freq_change_t freq_min; /**< Scale up frequency to lowest. */ +}; + +/** + * Register power uncore frequency operations. + * @param ops + * Pointer to an ops structure to register. + * @return + * - 0: Success. + * - Negative on error. + */ +__rte_internal +int rte_power_register_uncore_ops(struct rte_power_uncore_ops *ops); + +/** + * Macro to statically register the ops of an uncore driver. + */ +#define RTE_POWER_REGISTER_UNCORE_OPS(ops) \ +RTE_INIT(power_hdlr_init_uncore_##ops) \ +{ \ + rte_power_register_uncore_ops(&ops); \ +} + +#endif /* POWER_UNCORE_OPS_H */ diff --git a/lib/power/rte_power.c b/lib/power/rte_power.c deleted file mode 100644 index 36c3f3da98..0000000000 --- a/lib/power/rte_power.c +++ /dev/null @@ -1,257 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#include - -#include -#include - -#include "rte_power.h" -#include "power_acpi_cpufreq.h" -#include "power_cppc_cpufreq.h" -#include "power_common.h" -#include "power_kvm_vm.h" -#include "power_pstate_cpufreq.h" -#include "power_amd_pstate_cpufreq.h" - -enum power_management_env global_default_env = PM_ENV_NOT_SET; - -static rte_spinlock_t global_env_cfg_lock = RTE_SPINLOCK_INITIALIZER; - -/* function pointers */ -rte_power_freqs_t rte_power_freqs = NULL; -rte_power_get_freq_t rte_power_get_freq = NULL; -rte_power_set_freq_t rte_power_set_freq = NULL; -rte_power_freq_change_t rte_power_freq_up = NULL; -rte_power_freq_change_t rte_power_freq_down = NULL; -rte_power_freq_change_t rte_power_freq_max = NULL; -rte_power_freq_change_t rte_power_freq_min = NULL; -rte_power_freq_change_t rte_power_turbo_status; -rte_power_freq_change_t rte_power_freq_enable_turbo; -rte_power_freq_change_t rte_power_freq_disable_turbo; -rte_power_get_capabilities_t rte_power_get_capabilities; - -static void -reset_power_function_ptrs(void) -{ - rte_power_freqs = NULL; - rte_power_get_freq = NULL; - rte_power_set_freq = NULL; - rte_power_freq_up = NULL; - rte_power_freq_down = NULL; - rte_power_freq_max = NULL; - rte_power_freq_min = NULL; - rte_power_turbo_status = NULL; - rte_power_freq_enable_turbo = NULL; - rte_power_freq_disable_turbo = NULL; - rte_power_get_capabilities = NULL; -} - -int -rte_power_check_env_supported(enum power_management_env env) -{ - switch (env) { - case PM_ENV_ACPI_CPUFREQ: - return power_acpi_cpufreq_check_supported(); - case PM_ENV_PSTATE_CPUFREQ: - return power_pstate_cpufreq_check_supported(); - case PM_ENV_KVM_VM: - return power_kvm_vm_check_supported(); - case PM_ENV_CPPC_CPUFREQ: - return power_cppc_cpufreq_check_supported(); - case PM_ENV_AMD_PSTATE_CPUFREQ: - return power_amd_pstate_cpufreq_check_supported(); - default: - rte_errno = EINVAL; - return -1; - } -} - -int -rte_power_set_env(enum power_management_env env) -{ - rte_spinlock_lock(&global_env_cfg_lock); - - if (global_default_env != PM_ENV_NOT_SET) { - POWER_LOG(ERR, "Power Management Environment already set."); - rte_spinlock_unlock(&global_env_cfg_lock); - return -1; - } - - int ret = 0; - - if (env == PM_ENV_ACPI_CPUFREQ) { - rte_power_freqs = power_acpi_cpufreq_freqs; - rte_power_get_freq = power_acpi_cpufreq_get_freq; - rte_power_set_freq = power_acpi_cpufreq_set_freq; - rte_power_freq_up = power_acpi_cpufreq_freq_up; - rte_power_freq_down = power_acpi_cpufreq_freq_down; - rte_power_freq_min = power_acpi_cpufreq_freq_min; - rte_power_freq_max = power_acpi_cpufreq_freq_max; - rte_power_turbo_status = power_acpi_turbo_status; - rte_power_freq_enable_turbo = power_acpi_enable_turbo; - rte_power_freq_disable_turbo = power_acpi_disable_turbo; - rte_power_get_capabilities = power_acpi_get_capabilities; - } else if (env == PM_ENV_KVM_VM) { - rte_power_freqs = power_kvm_vm_freqs; - rte_power_get_freq = power_kvm_vm_get_freq; - rte_power_set_freq = power_kvm_vm_set_freq; - rte_power_freq_up = power_kvm_vm_freq_up; - rte_power_freq_down = power_kvm_vm_freq_down; - rte_power_freq_min = power_kvm_vm_freq_min; - rte_power_freq_max = power_kvm_vm_freq_max; - rte_power_turbo_status = power_kvm_vm_turbo_status; - rte_power_freq_enable_turbo = power_kvm_vm_enable_turbo; - rte_power_freq_disable_turbo = power_kvm_vm_disable_turbo; - rte_power_get_capabilities = power_kvm_vm_get_capabilities; - } else if (env == PM_ENV_PSTATE_CPUFREQ) { - rte_power_freqs = power_pstate_cpufreq_freqs; - rte_power_get_freq = power_pstate_cpufreq_get_freq; - rte_power_set_freq = power_pstate_cpufreq_set_freq; - rte_power_freq_up = power_pstate_cpufreq_freq_up; - rte_power_freq_down = power_pstate_cpufreq_freq_down; - rte_power_freq_min = power_pstate_cpufreq_freq_min; - rte_power_freq_max = power_pstate_cpufreq_freq_max; - rte_power_turbo_status = power_pstate_turbo_status; - rte_power_freq_enable_turbo = power_pstate_enable_turbo; - rte_power_freq_disable_turbo = power_pstate_disable_turbo; - rte_power_get_capabilities = power_pstate_get_capabilities; - - } else if (env == PM_ENV_CPPC_CPUFREQ) { - rte_power_freqs = power_cppc_cpufreq_freqs; - rte_power_get_freq = power_cppc_cpufreq_get_freq; - rte_power_set_freq = power_cppc_cpufreq_set_freq; - rte_power_freq_up = power_cppc_cpufreq_freq_up; - rte_power_freq_down = power_cppc_cpufreq_freq_down; - rte_power_freq_min = power_cppc_cpufreq_freq_min; - rte_power_freq_max = power_cppc_cpufreq_freq_max; - rte_power_turbo_status = power_cppc_turbo_status; - rte_power_freq_enable_turbo = power_cppc_enable_turbo; - rte_power_freq_disable_turbo = power_cppc_disable_turbo; - rte_power_get_capabilities = power_cppc_get_capabilities; - } else if (env == PM_ENV_AMD_PSTATE_CPUFREQ) { - rte_power_freqs = power_amd_pstate_cpufreq_freqs; - rte_power_get_freq = power_amd_pstate_cpufreq_get_freq; - rte_power_set_freq = power_amd_pstate_cpufreq_set_freq; - rte_power_freq_up = power_amd_pstate_cpufreq_freq_up; - rte_power_freq_down = power_amd_pstate_cpufreq_freq_down; - rte_power_freq_min = power_amd_pstate_cpufreq_freq_min; - rte_power_freq_max = power_amd_pstate_cpufreq_freq_max; - rte_power_turbo_status = power_amd_pstate_turbo_status; - rte_power_freq_enable_turbo = power_amd_pstate_enable_turbo; - rte_power_freq_disable_turbo = power_amd_pstate_disable_turbo; - rte_power_get_capabilities = power_amd_pstate_get_capabilities; - } else { - POWER_LOG(ERR, "Invalid Power Management Environment(%d) set", - env); - ret = -1; - } - - if (ret == 0) - global_default_env = env; - else { - global_default_env = PM_ENV_NOT_SET; - reset_power_function_ptrs(); - } - - rte_spinlock_unlock(&global_env_cfg_lock); - return ret; -} - -void -rte_power_unset_env(void) -{ - rte_spinlock_lock(&global_env_cfg_lock); - global_default_env = PM_ENV_NOT_SET; - reset_power_function_ptrs(); - rte_spinlock_unlock(&global_env_cfg_lock); -} - -enum power_management_env -rte_power_get_env(void) { - return global_default_env; -} - -int -rte_power_init(unsigned int lcore_id) -{ - int ret = -1; - - switch (global_default_env) { - case PM_ENV_ACPI_CPUFREQ: - return power_acpi_cpufreq_init(lcore_id); - case PM_ENV_KVM_VM: - return power_kvm_vm_init(lcore_id); - case PM_ENV_PSTATE_CPUFREQ: - return power_pstate_cpufreq_init(lcore_id); - case PM_ENV_CPPC_CPUFREQ: - return power_cppc_cpufreq_init(lcore_id); - case PM_ENV_AMD_PSTATE_CPUFREQ: - return power_amd_pstate_cpufreq_init(lcore_id); - default: - POWER_LOG(INFO, "Env isn't set yet!"); - } - - /* Auto detect Environment */ - POWER_LOG(INFO, "Attempting to initialise ACPI cpufreq power management..."); - ret = power_acpi_cpufreq_init(lcore_id); - if (ret == 0) { - rte_power_set_env(PM_ENV_ACPI_CPUFREQ); - goto out; - } - - POWER_LOG(INFO, "Attempting to initialise PSTAT power management..."); - ret = power_pstate_cpufreq_init(lcore_id); - if (ret == 0) { - rte_power_set_env(PM_ENV_PSTATE_CPUFREQ); - goto out; - } - - POWER_LOG(INFO, "Attempting to initialise AMD PSTATE power management..."); - ret = power_amd_pstate_cpufreq_init(lcore_id); - if (ret == 0) { - rte_power_set_env(PM_ENV_AMD_PSTATE_CPUFREQ); - goto out; - } - - POWER_LOG(INFO, "Attempting to initialise CPPC power management..."); - ret = power_cppc_cpufreq_init(lcore_id); - if (ret == 0) { - rte_power_set_env(PM_ENV_CPPC_CPUFREQ); - goto out; - } - - POWER_LOG(INFO, "Attempting to initialise VM power management..."); - ret = power_kvm_vm_init(lcore_id); - if (ret == 0) { - rte_power_set_env(PM_ENV_KVM_VM); - goto out; - } - POWER_LOG(ERR, "Unable to set Power Management Environment for lcore " - "%u", lcore_id); -out: - return ret; -} - -int -rte_power_exit(unsigned int lcore_id) -{ - switch (global_default_env) { - case PM_ENV_ACPI_CPUFREQ: - return power_acpi_cpufreq_exit(lcore_id); - case PM_ENV_KVM_VM: - return power_kvm_vm_exit(lcore_id); - case PM_ENV_PSTATE_CPUFREQ: - return power_pstate_cpufreq_exit(lcore_id); - case PM_ENV_CPPC_CPUFREQ: - return power_cppc_cpufreq_exit(lcore_id); - case PM_ENV_AMD_PSTATE_CPUFREQ: - return power_amd_pstate_cpufreq_exit(lcore_id); - default: - POWER_LOG(ERR, "Environment has not been set, unable to exit gracefully"); - - } - return -1; - -} diff --git a/lib/power/rte_power_cpufreq.c b/lib/power/rte_power_cpufreq.c new file mode 100644 index 0000000000..3576017239 --- /dev/null +++ b/lib/power/rte_power_cpufreq.c @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include +#include + +#include "rte_power_cpufreq.h" +#include "power_common.h" + +static enum power_management_env global_default_env = PM_ENV_NOT_SET; +static struct rte_power_cpufreq_ops *global_cpufreq_ops; + +static rte_spinlock_t global_env_cfg_lock = RTE_SPINLOCK_INITIALIZER; +static RTE_TAILQ_HEAD(, rte_power_cpufreq_ops) cpufreq_ops_list = + TAILQ_HEAD_INITIALIZER(cpufreq_ops_list); + +const char *power_env_str[] = { + "not set", + "acpi", + "kvm-vm", + "intel-pstate", + "cppc", + "amd-pstate" +}; + +/* register the ops struct in rte_power_cpufreq_ops, return 0 on success. */ +int +rte_power_register_cpufreq_ops(struct rte_power_cpufreq_ops *driver_ops) +{ + if (!driver_ops->init || !driver_ops->exit || + !driver_ops->check_env_support || !driver_ops->get_avail_freqs || + !driver_ops->get_freq || !driver_ops->set_freq || + !driver_ops->freq_up || !driver_ops->freq_down || + !driver_ops->freq_max || !driver_ops->freq_min || + !driver_ops->turbo_status || !driver_ops->enable_turbo || + !driver_ops->disable_turbo || !driver_ops->get_caps) { + POWER_LOG(ERR, "Missing callbacks while registering cpufreq ops"); + return -1; + } + + TAILQ_INSERT_TAIL(&cpufreq_ops_list, driver_ops, next); + + return 0; +} + +int +rte_power_check_env_supported(enum power_management_env env) +{ + struct rte_power_cpufreq_ops *ops; + + if (env >= RTE_DIM(power_env_str)) + return 0; + + RTE_TAILQ_FOREACH(ops, &cpufreq_ops_list, next) + if (strncmp(ops->name, power_env_str[env], + RTE_POWER_DRIVER_NAMESZ) == 0) + return ops->check_env_support(); + + return 0; +} + +int +rte_power_set_env(enum power_management_env env) +{ + struct rte_power_cpufreq_ops *ops; + int ret = -1; + + rte_spinlock_lock(&global_env_cfg_lock); + + if (global_default_env != PM_ENV_NOT_SET) { + POWER_LOG(ERR, "Power Management Environment already set."); + goto out; + } + + RTE_TAILQ_FOREACH(ops, &cpufreq_ops_list, next) + if (strncmp(ops->name, power_env_str[env], + RTE_POWER_DRIVER_NAMESZ) == 0) { + global_cpufreq_ops = ops; + global_default_env = env; + ret = 0; + goto out; + } + + POWER_LOG(ERR, "Invalid Power Management Environment(%d) set", + env); +out: + rte_spinlock_unlock(&global_env_cfg_lock); + return ret; +} + +void +rte_power_unset_env(void) +{ + rte_spinlock_lock(&global_env_cfg_lock); + global_default_env = PM_ENV_NOT_SET; + global_cpufreq_ops = NULL; + rte_spinlock_unlock(&global_env_cfg_lock); +} + +enum power_management_env +rte_power_get_env(void) { + return global_default_env; +} + +int +rte_power_init(unsigned int lcore_id) +{ + struct rte_power_cpufreq_ops *ops; + uint8_t env; + + if (global_default_env != PM_ENV_NOT_SET) + return global_cpufreq_ops->init(lcore_id); + + POWER_LOG(INFO, "Env isn't set yet!"); + + /* Auto detect Environment */ + RTE_TAILQ_FOREACH(ops, &cpufreq_ops_list, next) { + POWER_LOG(INFO, + "Attempting to initialise %s cpufreq power management...", + ops->name); + for (env = 0; env < RTE_DIM(power_env_str); env++) { + if ((strncmp(ops->name, power_env_str[env], + RTE_POWER_DRIVER_NAMESZ) == 0) && + (ops->init(lcore_id) == 0)) { + rte_power_set_env(env); + return 0; + } + } + } + + POWER_LOG(ERR, + "Unable to set Power Management Environment for lcore %u", + lcore_id); + + return -1; +} + +int +rte_power_exit(unsigned int lcore_id) +{ + if (global_default_env != PM_ENV_NOT_SET) + return global_cpufreq_ops->exit(lcore_id); + + POWER_LOG(ERR, + "Environment has not been set, unable to exit gracefully"); + + return -1; +} + +uint32_t +rte_power_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t n) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->get_avail_freqs(lcore_id, freqs, n); +} + +uint32_t +rte_power_get_freq(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->get_freq(lcore_id); +} + +uint32_t +rte_power_set_freq(unsigned int lcore_id, uint32_t index) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->set_freq(lcore_id, index); +} + +int +rte_power_freq_up(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->freq_up(lcore_id); +} + +int +rte_power_freq_down(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->freq_down(lcore_id); +} + +int +rte_power_freq_max(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->freq_max(lcore_id); +} + +int +rte_power_freq_min(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->freq_min(lcore_id); +} + +int +rte_power_turbo_status(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->turbo_status(lcore_id); +} + +int +rte_power_freq_enable_turbo(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->enable_turbo(lcore_id); +} + +int +rte_power_freq_disable_turbo(unsigned int lcore_id) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->disable_turbo(lcore_id); +} + +int +rte_power_get_capabilities(unsigned int lcore_id, + struct rte_power_core_capabilities *caps) +{ + RTE_ASSERT(global_cpufreq_ops != NULL); + return global_cpufreq_ops->get_caps(lcore_id, caps); +} diff --git a/lib/power/rte_power.h b/lib/power/rte_power_cpufreq.h similarity index 72% rename from lib/power/rte_power.h rename to lib/power/rte_power_cpufreq.h index 4fa4afe399..82d274214b 100644 --- a/lib/power/rte_power.h +++ b/lib/power/rte_power_cpufreq.h @@ -1,27 +1,34 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2024 Advanced Micro Devices, Inc. */ -#ifndef _RTE_POWER_H -#define _RTE_POWER_H +#ifndef RTE_POWER_CPUFREQ_H +#define RTE_POWER_CPUFREQ_H /** * @file - * RTE Power Management + * CPU Frequency Management */ #include #include -#include + +#include "power_cpufreq.h" #ifdef __cplusplus extern "C" { #endif /* Power Management Environment State */ -enum power_management_env {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM, - PM_ENV_PSTATE_CPUFREQ, PM_ENV_CPPC_CPUFREQ, - PM_ENV_AMD_PSTATE_CPUFREQ}; +enum power_management_env { + PM_ENV_NOT_SET = 0, + PM_ENV_ACPI_CPUFREQ, + PM_ENV_KVM_VM, + PM_ENV_PSTATE_CPUFREQ, + PM_ENV_CPPC_CPUFREQ, + PM_ENV_AMD_PSTATE_CPUFREQ +}; /** * Check if a specific power management environment type is supported on a @@ -108,10 +115,7 @@ int rte_power_exit(unsigned int lcore_id); * @return * The number of available frequencies. */ -typedef uint32_t (*rte_power_freqs_t)(unsigned int lcore_id, uint32_t *freqs, - uint32_t num); - -extern rte_power_freqs_t rte_power_freqs; +uint32_t rte_power_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t num); /** * Return the current index of available frequencies of a specific lcore. @@ -124,9 +128,7 @@ extern rte_power_freqs_t rte_power_freqs; * @return * The current index of available frequencies. */ -typedef uint32_t (*rte_power_get_freq_t)(unsigned int lcore_id); - -extern rte_power_get_freq_t rte_power_get_freq; +uint32_t rte_power_get_freq(unsigned int lcore_id); /** * Set the new frequency for a specific lcore by indicating the index of @@ -144,13 +146,12 @@ extern rte_power_get_freq_t rte_power_get_freq; * - 0 on success without frequency changed. * - Negative on error. */ -typedef int (*rte_power_set_freq_t)(unsigned int lcore_id, uint32_t index); - -extern rte_power_set_freq_t rte_power_set_freq; +uint32_t rte_power_set_freq(unsigned int lcore_id, uint32_t index); /** - * Function pointer definition for generic frequency change functions. Review - * each environments specific documentation for usage. + * Scale up the frequency of a specific lcore according to the available + * frequencies. + * Review each environments specific documentation for usage. * * @param lcore_id * lcore id. @@ -160,66 +161,92 @@ extern rte_power_set_freq_t rte_power_set_freq; * - 0 on success without frequency changed. * - Negative on error. */ -typedef int (*rte_power_freq_change_t)(unsigned int lcore_id); - -/** - * Scale up the frequency of a specific lcore according to the available - * frequencies. - * Review each environments specific documentation for usage. - */ -extern rte_power_freq_change_t rte_power_freq_up; +int rte_power_freq_up(unsigned int lcore_id); /** * Scale down the frequency of a specific lcore according to the available * frequencies. * Review each environments specific documentation for usage. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. */ -extern rte_power_freq_change_t rte_power_freq_down; +int rte_power_freq_down(unsigned int lcore_id); /** * Scale up the frequency of a specific lcore to the highest according to the * available frequencies. * Review each environments specific documentation for usage. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. */ -extern rte_power_freq_change_t rte_power_freq_max; +int rte_power_freq_max(unsigned int lcore_id); /** * Scale down the frequency of a specific lcore to the lowest according to the * available frequencies. * Review each environments specific documentation for usage.. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. */ -extern rte_power_freq_change_t rte_power_freq_min; +int rte_power_freq_min(unsigned int lcore_id); /** * Query the Turbo Boost status of a specific lcore. * Review each environments specific documentation for usage.. + * + * @param lcore_id + * lcore id. + * + * @return + * - 1 turbo boost enabled. + * - 0 turbo boost disabled. + * - Negative on error. */ -extern rte_power_freq_change_t rte_power_turbo_status; +int rte_power_turbo_status(unsigned int lcore_id); /** * Enable Turbo Boost for this lcore. * Review each environments specific documentation for usage.. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 on success. + * - Negative on error. */ -extern rte_power_freq_change_t rte_power_freq_enable_turbo; +int rte_power_freq_enable_turbo(unsigned int lcore_id); /** * Disable Turbo Boost for this lcore. * Review each environments specific documentation for usage.. + * + * @param lcore_id + * lcore id. + * + * @return + * - 0 on success. + * - Negative on error. */ -extern rte_power_freq_change_t rte_power_freq_disable_turbo; - -/** - * Power capabilities summary. - */ -struct rte_power_core_capabilities { - union { - uint64_t capabilities; - struct { - uint64_t turbo:1; /**< Turbo can be enabled. */ - uint64_t priority:1; /**< SST-BF high freq core */ - }; - }; -}; +int rte_power_freq_disable_turbo(unsigned int lcore_id); /** * Returns power capabilities for a specific lcore. @@ -235,13 +262,11 @@ struct rte_power_core_capabilities { * - 0 on success. * - Negative on error. */ -typedef int (*rte_power_get_capabilities_t)(unsigned int lcore_id, +int rte_power_get_capabilities(unsigned int lcore_id, struct rte_power_core_capabilities *caps); -extern rte_power_get_capabilities_t rte_power_get_capabilities; - #ifdef __cplusplus } #endif -#endif +#endif /* RTE_POWER_CPUFREQ_H */ diff --git a/lib/power/rte_power_pmd_mgmt.c b/lib/power/rte_power_pmd_mgmt.c index 5e50613f5b..a2fff3b765 100644 --- a/lib/power/rte_power_pmd_mgmt.c +++ b/lib/power/rte_power_pmd_mgmt.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -69,7 +70,7 @@ struct __rte_cache_aligned pmd_core_cfg { uint64_t sleep_target; /**< Prevent a queue from triggering sleep multiple times */ }; -static struct pmd_core_cfg lcore_cfgs[RTE_MAX_LCORE]; +static RTE_LCORE_VAR_HANDLE(struct pmd_core_cfg, lcore_cfgs); static inline bool queue_equal(const union queue *l, const union queue *r) @@ -252,12 +253,11 @@ clb_multiwait(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx, uint16_t max_pkts __rte_unused, void *arg) { - const unsigned int lcore = rte_lcore_id(); struct queue_list_entry *queue_conf = arg; struct pmd_core_cfg *lcore_conf; const bool empty = nb_rx == 0; - lcore_conf = &lcore_cfgs[lcore]; + lcore_conf = RTE_LCORE_VAR(lcore_cfgs); /* early exit */ if (likely(!empty)) @@ -317,13 +317,12 @@ clb_pause(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx, uint16_t max_pkts __rte_unused, void *arg) { - const unsigned int lcore = rte_lcore_id(); struct queue_list_entry *queue_conf = arg; struct pmd_core_cfg *lcore_conf; const bool empty = nb_rx == 0; uint32_t pause_duration = rte_power_pmd_mgmt_get_pause_duration(); - lcore_conf = &lcore_cfgs[lcore]; + lcore_conf = RTE_LCORE_VAR(lcore_cfgs); if (likely(!empty)) /* early exit */ @@ -358,9 +357,8 @@ clb_scale_freq(uint16_t port_id __rte_unused, uint16_t qidx __rte_unused, struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx, uint16_t max_pkts __rte_unused, void *arg) { - const unsigned int lcore = rte_lcore_id(); const bool empty = nb_rx == 0; - struct pmd_core_cfg *lcore_conf = &lcore_cfgs[lcore]; + struct pmd_core_cfg *lcore_conf = RTE_LCORE_VAR(lcore_cfgs); struct queue_list_entry *queue_conf = arg; if (likely(!empty)) { @@ -519,7 +517,7 @@ rte_power_ethdev_pmgmt_queue_enable(unsigned int lcore_id, uint16_t port_id, goto end; } - lcore_cfg = &lcore_cfgs[lcore_id]; + lcore_cfg = RTE_LCORE_VAR_LCORE(lcore_id, lcore_cfgs); /* check if other queues are stopped as well */ ret = cfg_queues_stopped(lcore_cfg); @@ -620,7 +618,7 @@ rte_power_ethdev_pmgmt_queue_disable(unsigned int lcore_id, } /* no need to check queue id as wrong queue id would not be enabled */ - lcore_cfg = &lcore_cfgs[lcore_id]; + lcore_cfg = RTE_LCORE_VAR_LCORE(lcore_id, lcore_cfgs); /* check if other queues are stopped as well */ ret = cfg_queues_stopped(lcore_cfg); @@ -770,21 +768,22 @@ rte_power_pmd_mgmt_get_scaling_freq_max(unsigned int lcore) } RTE_INIT(rte_power_ethdev_pmgmt_init) { - size_t i; - int j; + unsigned int lcore_id; + struct pmd_core_cfg *lcore_cfg; + int i; + + RTE_LCORE_VAR_ALLOC(lcore_cfgs); /* initialize all tailqs */ - for (i = 0; i < RTE_DIM(lcore_cfgs); i++) { - struct pmd_core_cfg *cfg = &lcore_cfgs[i]; - TAILQ_INIT(&cfg->head); - } + RTE_LCORE_VAR_FOREACH(lcore_id, lcore_cfg, lcore_cfgs) + TAILQ_INIT(&lcore_cfg->head); /* initialize config defaults */ emptypoll_max = 512; pause_duration = 1; /* scaling defaults out of range to ensure not used unless set by user or app */ - for (j = 0; j < RTE_MAX_LCORE; j++) { - scale_freq_min[j] = 0; - scale_freq_max[j] = UINT32_MAX; + for (i = 0; i < RTE_MAX_LCORE; i++) { + scale_freq_min[i] = 0; + scale_freq_max[i] = UINT32_MAX; } } diff --git a/lib/power/rte_power_pmd_mgmt.h b/lib/power/rte_power_pmd_mgmt.h index 807e454096..58c25bc3ff 100644 --- a/lib/power/rte_power_pmd_mgmt.h +++ b/lib/power/rte_power_pmd_mgmt.h @@ -13,7 +13,7 @@ #include #include -#include +#include #ifdef __cplusplus extern "C" { diff --git a/lib/power/rte_power_qos.c b/lib/power/rte_power_qos.c new file mode 100644 index 0000000000..4dd0532b36 --- /dev/null +++ b/lib/power/rte_power_qos.c @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 HiSilicon Limited + */ + +#include +#include +#include + +#include +#include + +#include "power_common.h" +#include "rte_power_qos.h" + +#define PM_QOS_SYSFILE_RESUME_LATENCY_US \ + "/sys/devices/system/cpu/cpu%u/power/pm_qos_resume_latency_us" + +#define PM_QOS_CPU_RESUME_LATENCY_BUF_LEN 32 + +int +rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency) +{ + char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN]; + uint32_t cpu_id; + FILE *f; + int ret; + + if (!rte_lcore_is_enabled(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id); + return -EINVAL; + } + ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id); + if (ret != 0) + return ret; + + if (latency < 0) { + POWER_LOG(ERR, "latency should be greater than and equal to 0"); + return -EINVAL; + } + + ret = open_core_sysfs_file(&f, "w", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id); + if (ret != 0) { + POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s", + cpu_id, strerror(errno)); + return ret; + } + + /* + * Based on the sysfs interface pm_qos_resume_latency_us under + * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning + * is as follows for different input string. + * 1> the resume latency is 0 if the input is "n/a". + * 2> the resume latency is no constraint if the input is "0". + * 3> the resume latency is the actual value to be set. + */ + if (latency == RTE_POWER_QOS_STRICT_LATENCY_VALUE) + snprintf(buf, sizeof(buf), "%s", "n/a"); + else if (latency == RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT) + snprintf(buf, sizeof(buf), "%u", 0); + else + snprintf(buf, sizeof(buf), "%u", latency); + + ret = write_core_sysfs_s(f, buf); + if (ret != 0) + POWER_LOG(ERR, "Failed to write "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s", + cpu_id, strerror(errno)); + + fclose(f); + + return ret; +} + +int +rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id) +{ + char buf[PM_QOS_CPU_RESUME_LATENCY_BUF_LEN]; + int latency = -1; + uint32_t cpu_id; + FILE *f; + int ret; + + if (!rte_lcore_is_enabled(lcore_id)) { + POWER_LOG(ERR, "lcore id %u is not enabled", lcore_id); + return -EINVAL; + } + ret = power_get_lcore_mapped_cpu_id(lcore_id, &cpu_id); + if (ret != 0) + return ret; + + ret = open_core_sysfs_file(&f, "r", PM_QOS_SYSFILE_RESUME_LATENCY_US, cpu_id); + if (ret != 0) { + POWER_LOG(ERR, "Failed to open "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s", + cpu_id, strerror(errno)); + return ret; + } + + ret = read_core_sysfs_s(f, buf, sizeof(buf)); + if (ret != 0) { + POWER_LOG(ERR, "Failed to read "PM_QOS_SYSFILE_RESUME_LATENCY_US" : %s", + cpu_id, strerror(errno)); + goto out; + } + + /* + * Based on the sysfs interface pm_qos_resume_latency_us under + * @PM_QOS_SYSFILE_RESUME_LATENCY_US directory in kernel, their meaning + * is as follows for different output string. + * 1> the resume latency is 0 if the output is "n/a". + * 2> the resume latency is no constraint if the output is "0". + * 3> the resume latency is the actual value in used for other string. + */ + if (strcmp(buf, "n/a") == 0) + latency = RTE_POWER_QOS_STRICT_LATENCY_VALUE; + else { + latency = strtoul(buf, NULL, 10); + latency = latency == 0 ? RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT : latency; + } + +out: + fclose(f); + + return latency != -1 ? latency : ret; +} diff --git a/lib/power/rte_power_qos.h b/lib/power/rte_power_qos.h new file mode 100644 index 0000000000..7a8dab9272 --- /dev/null +++ b/lib/power/rte_power_qos.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 HiSilicon Limited + */ + +#ifndef RTE_POWER_QOS_H +#define RTE_POWER_QOS_H + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file rte_power_qos.h + * + * PM QoS API. + * + * The CPU-wide resume latency limit has a positive impact on this CPU's idle + * state selection in each cpuidle governor. + * Please see the PM QoS on CPU wide in the following link: + * https://www.kernel.org/doc/html/latest/admin-guide/abi-testing.html?highlight=pm_qos_resume_latency_us#abi-sys-devices-power-pm-qos-resume-latency-us + * + * The deeper the idle state, the lower the power consumption, but the + * longer the resume time. Some service are delay sensitive and very except the + * low resume time, like interrupt packet receiving mode. + * + * In these case, per-CPU PM QoS API can be used to control this CPU's idle + * state selection and limit just enter the shallowest idle state to low the + * delay after sleep by setting strict resume latency (zero value). + */ + +#define RTE_POWER_QOS_STRICT_LATENCY_VALUE 0 +#define RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT INT32_MAX + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * @param lcore_id + * target logical core id + * + * @param latency + * The latency should be greater than and equal to zero in microseconds unit. + * + * @return + * 0 on success. Otherwise negative value is returned. + */ +__rte_experimental +int rte_power_qos_set_cpu_resume_latency(uint16_t lcore_id, int latency); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Get the current resume latency of this logical core. + * The default value in kernel is @see RTE_POWER_QOS_RESUME_LATENCY_NO_CONSTRAINT + * if don't set it. + * + * @return + * Negative value on failure. + * >= 0 means the actual resume latency limit on this core. + */ +__rte_experimental +int rte_power_qos_get_cpu_resume_latency(uint16_t lcore_id); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_POWER_QOS_H */ diff --git a/lib/power/rte_power_uncore.c b/lib/power/rte_power_uncore.c index 48c75a5da0..741e067932 100644 --- a/lib/power/rte_power_uncore.c +++ b/lib/power/rte_power_uncore.c @@ -3,107 +3,58 @@ * Copyright(c) 2023 AMD Corporation */ -#include - -#include #include +#include -#include "power_common.h" #include "rte_power_uncore.h" -#include "power_intel_uncore.h" +#include "power_common.h" -enum rte_uncore_power_mgmt_env default_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET; +static enum rte_uncore_power_mgmt_env global_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET; +static struct rte_power_uncore_ops *global_uncore_ops; static rte_spinlock_t global_env_cfg_lock = RTE_SPINLOCK_INITIALIZER; +static RTE_TAILQ_HEAD(, rte_power_uncore_ops) uncore_ops_list = + TAILQ_HEAD_INITIALIZER(uncore_ops_list); -static uint32_t -power_get_dummy_uncore_freq(unsigned int pkg __rte_unused, - unsigned int die __rte_unused) -{ - return 0; -} - -static int -power_set_dummy_uncore_freq(unsigned int pkg __rte_unused, - unsigned int die __rte_unused, uint32_t index __rte_unused) -{ - return 0; -} - -static int -power_dummy_uncore_freq_max(unsigned int pkg __rte_unused, - unsigned int die __rte_unused) -{ - return 0; -} - -static int -power_dummy_uncore_freq_min(unsigned int pkg __rte_unused, - unsigned int die __rte_unused) -{ - return 0; -} +const char *uncore_env_str[] = { + "not set", + "auto-detect", + "intel-uncore", + "amd-hsmp" +}; -static int -power_dummy_uncore_freqs(unsigned int pkg __rte_unused, unsigned int die __rte_unused, - uint32_t *freqs __rte_unused, uint32_t num __rte_unused) +/* register the ops struct in rte_power_uncore_ops, return 0 on success. */ +int +rte_power_register_uncore_ops(struct rte_power_uncore_ops *driver_ops) { - return 0; -} + if (!driver_ops->init || !driver_ops->exit || !driver_ops->get_num_pkgs || + !driver_ops->get_num_dies || !driver_ops->get_num_freqs || + !driver_ops->get_avail_freqs || !driver_ops->get_freq || + !driver_ops->set_freq || !driver_ops->freq_max || + !driver_ops->freq_min) { + POWER_LOG(ERR, "Missing callbacks while registering power ops"); + return -1; + } -static int -power_dummy_uncore_get_num_freqs(unsigned int pkg __rte_unused, - unsigned int die __rte_unused) -{ - return 0; -} + if (driver_ops->cb) + driver_ops->cb(); -static unsigned int -power_dummy_uncore_get_num_pkgs(void) -{ - return 0; -} + TAILQ_INSERT_TAIL(&uncore_ops_list, driver_ops, next); -static unsigned int -power_dummy_uncore_get_num_dies(unsigned int pkg __rte_unused) -{ return 0; } -/* function pointers */ -rte_power_get_uncore_freq_t rte_power_get_uncore_freq = power_get_dummy_uncore_freq; -rte_power_set_uncore_freq_t rte_power_set_uncore_freq = power_set_dummy_uncore_freq; -rte_power_uncore_freq_change_t rte_power_uncore_freq_max = power_dummy_uncore_freq_max; -rte_power_uncore_freq_change_t rte_power_uncore_freq_min = power_dummy_uncore_freq_min; -rte_power_uncore_freqs_t rte_power_uncore_freqs = power_dummy_uncore_freqs; -rte_power_uncore_get_num_freqs_t rte_power_uncore_get_num_freqs = power_dummy_uncore_get_num_freqs; -rte_power_uncore_get_num_pkgs_t rte_power_uncore_get_num_pkgs = power_dummy_uncore_get_num_pkgs; -rte_power_uncore_get_num_dies_t rte_power_uncore_get_num_dies = power_dummy_uncore_get_num_dies; - -static void -reset_power_uncore_function_ptrs(void) -{ - rte_power_get_uncore_freq = power_get_dummy_uncore_freq; - rte_power_set_uncore_freq = power_set_dummy_uncore_freq; - rte_power_uncore_freq_max = power_dummy_uncore_freq_max; - rte_power_uncore_freq_min = power_dummy_uncore_freq_min; - rte_power_uncore_freqs = power_dummy_uncore_freqs; - rte_power_uncore_get_num_freqs = power_dummy_uncore_get_num_freqs; - rte_power_uncore_get_num_pkgs = power_dummy_uncore_get_num_pkgs; - rte_power_uncore_get_num_dies = power_dummy_uncore_get_num_dies; -} - int rte_power_set_uncore_env(enum rte_uncore_power_mgmt_env env) { - int ret; + int ret = -1; + struct rte_power_uncore_ops *ops; rte_spinlock_lock(&global_env_cfg_lock); - if (default_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) { + if (global_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) { POWER_LOG(ERR, "Uncore Power Management Env already set."); - rte_spinlock_unlock(&global_env_cfg_lock); - return -1; + goto out; } if (env == RTE_UNCORE_PM_ENV_AUTO_DETECT) @@ -113,23 +64,20 @@ rte_power_set_uncore_env(enum rte_uncore_power_mgmt_env env) */ env = RTE_UNCORE_PM_ENV_INTEL_UNCORE; - ret = 0; - if (env == RTE_UNCORE_PM_ENV_INTEL_UNCORE) { - rte_power_get_uncore_freq = power_get_intel_uncore_freq; - rte_power_set_uncore_freq = power_set_intel_uncore_freq; - rte_power_uncore_freq_min = power_intel_uncore_freq_min; - rte_power_uncore_freq_max = power_intel_uncore_freq_max; - rte_power_uncore_freqs = power_intel_uncore_freqs; - rte_power_uncore_get_num_freqs = power_intel_uncore_get_num_freqs; - rte_power_uncore_get_num_pkgs = power_intel_uncore_get_num_pkgs; - rte_power_uncore_get_num_dies = power_intel_uncore_get_num_dies; - } else { - POWER_LOG(ERR, "Invalid Power Management Environment(%d) set", env); - ret = -1; - goto out; - } + if (env <= RTE_DIM(uncore_env_str)) { + RTE_TAILQ_FOREACH(ops, &uncore_ops_list, next) + if (strncmp(ops->name, uncore_env_str[env], + RTE_POWER_UNCORE_DRIVER_NAMESZ) == 0) { + global_uncore_env = env; + global_uncore_ops = ops; + ret = 0; + goto out; + } + POWER_LOG(ERR, "Power Management (%s) not supported", + uncore_env_str[env]); + } else + POWER_LOG(ERR, "Invalid Power Management Environment"); - default_uncore_env = env; out: rte_spinlock_unlock(&global_env_cfg_lock); return ret; @@ -139,43 +87,43 @@ void rte_power_unset_uncore_env(void) { rte_spinlock_lock(&global_env_cfg_lock); - default_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET; - reset_power_uncore_function_ptrs(); + global_uncore_env = RTE_UNCORE_PM_ENV_NOT_SET; rte_spinlock_unlock(&global_env_cfg_lock); } enum rte_uncore_power_mgmt_env rte_power_get_uncore_env(void) { - return default_uncore_env; + return global_uncore_env; } int rte_power_uncore_init(unsigned int pkg, unsigned int die) { int ret = -1; - - switch (default_uncore_env) { - case RTE_UNCORE_PM_ENV_INTEL_UNCORE: - return power_intel_uncore_init(pkg, die); - default: - POWER_LOG(INFO, "Uncore Env isn't set yet!"); - break; - } - - /* Auto detect Environment */ - POWER_LOG(INFO, "Attempting to initialise Intel Uncore power mgmt..."); - ret = power_intel_uncore_init(pkg, die); - if (ret == 0) { - rte_power_set_uncore_env(RTE_UNCORE_PM_ENV_INTEL_UNCORE); - goto out; - } - - if (default_uncore_env == RTE_UNCORE_PM_ENV_NOT_SET) { - POWER_LOG(ERR, "Unable to set Power Management Environment " - "for package %u Die %u", pkg, die); - ret = 0; - } + struct rte_power_uncore_ops *ops; + uint8_t env; + + if ((global_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) && + (global_uncore_env != RTE_UNCORE_PM_ENV_AUTO_DETECT)) + return global_uncore_ops->init(pkg, die); + + /* Auto Detect Environment */ + RTE_TAILQ_FOREACH(ops, &uncore_ops_list, next) + if (ops) { + POWER_LOG(INFO, + "Attempting to initialise %s power management...", + ops->name); + ret = ops->init(pkg, die); + if (ret == 0) { + for (env = 0; env < RTE_DIM(uncore_env_str); env++) + if (strncmp(ops->name, uncore_env_str[env], + RTE_POWER_UNCORE_DRIVER_NAMESZ) == 0) { + rte_power_set_uncore_env(env); + goto out; + } + } + } out: return ret; } @@ -183,12 +131,69 @@ rte_power_uncore_init(unsigned int pkg, unsigned int die) int rte_power_uncore_exit(unsigned int pkg, unsigned int die) { - switch (default_uncore_env) { - case RTE_UNCORE_PM_ENV_INTEL_UNCORE: - return power_intel_uncore_exit(pkg, die); - default: - POWER_LOG(ERR, "Uncore Env has not been set, unable to exit gracefully"); - break; - } + if ((global_uncore_env != RTE_UNCORE_PM_ENV_NOT_SET) && + global_uncore_ops) + return global_uncore_ops->exit(pkg, die); + + POWER_LOG(ERR, + "Uncore Env has not been set, unable to exit gracefully"); + return -1; } + +uint32_t +rte_power_get_uncore_freq(unsigned int pkg, unsigned int die) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->get_freq(pkg, die); +} + +int +rte_power_set_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->set_freq(pkg, die, index); +} + +int +rte_power_uncore_freq_max(unsigned int pkg, unsigned int die) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->freq_max(pkg, die); +} + +int +rte_power_uncore_freq_min(unsigned int pkg, unsigned int die) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->freq_min(pkg, die); +} + +int +rte_power_uncore_freqs(unsigned int pkg, unsigned int die, + uint32_t *freqs, uint32_t num) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->get_avail_freqs(pkg, die, freqs, num); +} + +int +rte_power_uncore_get_num_freqs(unsigned int pkg, unsigned int die) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->get_num_freqs(pkg, die); +} + +unsigned int +rte_power_uncore_get_num_pkgs(void) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->get_num_pkgs(); +} + +unsigned int +rte_power_uncore_get_num_dies(unsigned int pkg) +{ + RTE_ASSERT(global_uncore_ops != NULL); + return global_uncore_ops->get_num_dies(pkg); +} diff --git a/lib/power/rte_power_uncore.h b/lib/power/rte_power_uncore.h index 99859042dd..dfeade77e9 100644 --- a/lib/power/rte_power_uncore.h +++ b/lib/power/rte_power_uncore.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2022 Intel Corporation - * Copyright(c) 2023 AMD Corporation + * Copyright(c) 2024 Advanced Micro Devices, Inc. */ #ifndef RTE_POWER_UNCORE_H @@ -8,11 +8,10 @@ /** * @file - * RTE Uncore Frequency Management + * Uncore Frequency Management */ -#include -#include "rte_power.h" +#include "power_uncore_ops.h" #ifdef __cplusplus extern "C" { @@ -116,9 +115,7 @@ rte_power_uncore_exit(unsigned int pkg, unsigned int die); * The current index of available frequencies. * If error, it will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)'. */ -typedef uint32_t (*rte_power_get_uncore_freq_t)(unsigned int pkg, unsigned int die); - -extern rte_power_get_uncore_freq_t rte_power_get_uncore_freq; +uint32_t rte_power_get_uncore_freq(unsigned int pkg, unsigned int die); /** * Set minimum and maximum uncore frequency for specified die on a package @@ -141,12 +138,14 @@ extern rte_power_get_uncore_freq_t rte_power_get_uncore_freq; * - 0 on success without frequency changed. * - Negative on error. */ -typedef int (*rte_power_set_uncore_freq_t)(unsigned int pkg, unsigned int die, uint32_t index); - -extern rte_power_set_uncore_freq_t rte_power_set_uncore_freq; +int rte_power_set_uncore_freq(unsigned int pkg, unsigned int die, uint32_t index); /** - * Function pointer definition for generic frequency change functions. + * Set minimum and maximum uncore frequency for specified die on a package + * to maximum value according to the available frequencies. + * It should be protected outside of this function for threadsafe. + * + * This function should NOT be called in the fast path. * * @param pkg * Package number. @@ -160,16 +159,7 @@ extern rte_power_set_uncore_freq_t rte_power_set_uncore_freq; * - 0 on success without frequency changed. * - Negative on error. */ -typedef int (*rte_power_uncore_freq_change_t)(unsigned int pkg, unsigned int die); - -/** - * Set minimum and maximum uncore frequency for specified die on a package - * to maximum value according to the available frequencies. - * It should be protected outside of this function for threadsafe. - * - * This function should NOT be called in the fast path. - */ -extern rte_power_uncore_freq_change_t rte_power_uncore_freq_max; +int rte_power_uncore_freq_max(unsigned int pkg, unsigned int die); /** * Set minimum and maximum uncore frequency for specified die on a package @@ -177,8 +167,20 @@ extern rte_power_uncore_freq_change_t rte_power_uncore_freq_max; * It should be protected outside of this function for threadsafe. * * This function should NOT be called in the fast path. + * + * @param pkg + * Package number. + * Each physical CPU in a system is referred to as a package. + * @param die + * Die number. + * Each package can have several dies connected together via the uncore mesh. + * + * @return + * - 1 on success with frequency changed. + * - 0 on success without frequency changed. + * - Negative on error. */ -extern rte_power_uncore_freq_change_t rte_power_uncore_freq_min; +int rte_power_uncore_freq_min(unsigned int pkg, unsigned int die); /** * Return the list of available frequencies in the index array. @@ -200,11 +202,10 @@ extern rte_power_uncore_freq_change_t rte_power_uncore_freq_min; * - The number of available index's in frequency array. * - Negative on error. */ -typedef int (*rte_power_uncore_freqs_t)(unsigned int pkg, unsigned int die, +__rte_experimental +int rte_power_uncore_freqs(unsigned int pkg, unsigned int die, uint32_t *freqs, uint32_t num); -extern rte_power_uncore_freqs_t rte_power_uncore_freqs; - /** * Return the list length of available frequencies in the index array. * @@ -221,9 +222,7 @@ extern rte_power_uncore_freqs_t rte_power_uncore_freqs; * - The number of available index's in frequency array. * - Negative on error. */ -typedef int (*rte_power_uncore_get_num_freqs_t)(unsigned int pkg, unsigned int die); - -extern rte_power_uncore_get_num_freqs_t rte_power_uncore_get_num_freqs; +int rte_power_uncore_get_num_freqs(unsigned int pkg, unsigned int die); /** * Return the number of packages (CPUs) on a system @@ -235,9 +234,7 @@ extern rte_power_uncore_get_num_freqs_t rte_power_uncore_get_num_freqs; * - Zero on error. * - Number of package on system on success. */ -typedef unsigned int (*rte_power_uncore_get_num_pkgs_t)(void); - -extern rte_power_uncore_get_num_pkgs_t rte_power_uncore_get_num_pkgs; +unsigned int rte_power_uncore_get_num_pkgs(void); /** * Return the number of dies for pakckages (CPUs) specified @@ -253,9 +250,7 @@ extern rte_power_uncore_get_num_pkgs_t rte_power_uncore_get_num_pkgs; * - Zero on error. * - Number of dies for package on sucecss. */ -typedef unsigned int (*rte_power_uncore_get_num_dies_t)(unsigned int pkg); - -extern rte_power_uncore_get_num_dies_t rte_power_uncore_get_num_dies; +unsigned int rte_power_uncore_get_num_dies(unsigned int pkg); #ifdef __cplusplus } diff --git a/lib/power/version.map b/lib/power/version.map index c9a226614e..9a36046a64 100644 --- a/lib/power/version.map +++ b/lib/power/version.map @@ -16,8 +16,6 @@ DPDK_25 { rte_power_get_env; rte_power_get_freq; rte_power_get_uncore_freq; - rte_power_guest_channel_receive_msg; - rte_power_guest_channel_send_msg; rte_power_init; rte_power_pmd_mgmt_get_emptypoll_max; rte_power_pmd_mgmt_get_pause_duration; @@ -51,4 +49,23 @@ EXPERIMENTAL { rte_power_set_uncore_env; rte_power_uncore_freqs; rte_power_unset_uncore_env; + + # added in 24.11 + rte_power_qos_get_cpu_resume_latency; + rte_power_qos_set_cpu_resume_latency; +}; + +INTERNAL { + global: + + rte_power_register_cpufreq_ops; + rte_power_register_uncore_ops; + rte_power_logtype; + cpufreq_check_scaling_driver; + power_get_lcore_mapped_cpu_id; + power_set_governor; + open_core_sysfs_file; + read_core_sysfs_u32; + read_core_sysfs_s; + write_core_sysfs_s; }; diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build index 41b622a9be..0004f283bb 100644 --- a/lib/vhost/meson.build +++ b/lib/vhost/meson.build @@ -16,23 +16,23 @@ elif (toolchain == 'icc' and cc.version().version_compare('>=16.0.0')) cflags += '-DVHOST_ICC_UNROLL_PRAGMA' endif dpdk_conf.set('RTE_LIBRTE_VHOST_POSTCOPY', cc.has_header('linux/userfaultfd.h')) -cflags += '-fno-strict-aliasing' +cflags += [ + '-fno-strict-aliasing', + '-Wno-address-of-packed-member', +] sources = files( 'fd_man.c', 'iotlb.c', 'socket.c', 'vdpa.c', + 'vduse.c', 'vhost.c', 'vhost_crypto.c', 'vhost_user.c', 'virtio_net.c', 'virtio_net_ctrl.c', ) -if cc.has_header('linux/vduse.h') - sources += files('vduse.c') - cflags += '-DVHOST_HAS_VDUSE' -endif headers = files( 'rte_vdpa.h', 'rte_vhost.h', diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c index f9ac317438..eaf3146b95 100644 --- a/lib/vhost/vduse.c +++ b/lib/vhost/vduse.c @@ -8,7 +8,7 @@ #include -#include +#include #include #include @@ -431,6 +431,9 @@ vduse_reconnect_path_init(void) const char *directory; int ret; + if (vduse_reconnect_path_set == true) + return 0; + /* from RuntimeDirectory= see systemd.exec */ directory = getenv("RUNTIME_DIRECTORY"); if (directory == NULL) { @@ -462,6 +465,100 @@ vduse_reconnect_path_init(void) VHOST_CONFIG_LOG("vduse", INFO, "Created VDUSE reconnect directory in %s", vduse_reconnect_dir); + vduse_reconnect_path_set = true; + + return 0; +} + +static int +vduse_reconnect_log_map(struct virtio_net *dev, bool create) +{ + char reco_file[PATH_MAX]; + int fd, ret; + const char *name = dev->ifname + strlen("/dev/vduse/"); + + if (vduse_reconnect_path_init() < 0) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to initialize reconnect path"); + return -1; + } + + ret = snprintf(reco_file, sizeof(reco_file), "%s/%s", vduse_reconnect_dir, name); + if (ret < 0 || ret == sizeof(reco_file)) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to create vduse reconnect path name"); + return -1; + } + + if (create) { + fd = open(reco_file, O_CREAT | O_EXCL | O_RDWR, 0600); + if (fd < 0) { + if (errno == EEXIST) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Reconnect file %s exists but not the device", + reco_file); + } else { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to open reconnect file %s (%s)", + reco_file, strerror(errno)); + } + return -1; + } + + ret = ftruncate(fd, sizeof(*dev->reconnect_log)); + if (ret < 0) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to truncate reconnect file %s (%s)", + reco_file, strerror(errno)); + goto out_close; + } + } else { + fd = open(reco_file, O_RDWR, 0600); + if (fd < 0) { + if (errno == ENOENT) + VHOST_CONFIG_LOG(dev->ifname, ERR, "Missing reconnect file (%s)", reco_file); + else + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to open reconnect file %s (%s)", + reco_file, strerror(errno)); + return -1; + } + } + + dev->reconnect_log = mmap(NULL, sizeof(*dev->reconnect_log), PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); + if (dev->reconnect_log == MAP_FAILED) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to mmap reconnect file %s (%s)", + reco_file, strerror(errno)); + ret = -1; + goto out_close; + } + ret = 0; + +out_close: + close(fd); + + return ret; +} + +static int +vduse_reconnect_log_check(struct virtio_net *dev, uint64_t features, uint32_t total_queues) +{ + if (dev->reconnect_log->version != VHOST_RECONNECT_VERSION) { + VHOST_CONFIG_LOG(dev->ifname, ERR, + "Version mismatch between backend (0x%x) & reconnection file (0x%x)", + VHOST_RECONNECT_VERSION, dev->reconnect_log->version); + return -1; + } + + if ((dev->reconnect_log->features & features) != dev->reconnect_log->features) { + VHOST_CONFIG_LOG(dev->ifname, ERR, + "Features mismatch between backend (0x%" PRIx64 ") & reconnection file (0x%" PRIx64 ")", + features, dev->reconnect_log->features); + return -1; + } + + if (dev->reconnect_log->nr_vrings != total_queues) { + VHOST_CONFIG_LOG(dev->ifname, ERR, + "Queues number mismatch between backend (%u) and reconnection file (%u)", + total_queues, dev->reconnect_log->nr_vrings); + return -1; + } + return 0; } @@ -476,19 +573,56 @@ vduse_reconnect_handler(int fd, void *arg, int *remove) *remove = 1; } +static int +vduse_reconnect_start_device(struct virtio_net *dev) +{ + int fd, ret; + + /* + * Make vduse_device_start() being executed in the same + * context for both reconnection and fresh startup. + */ + fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); + if (fd < 0) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to create reconnect efd: %s", + strerror(errno)); + ret = -1; + goto out_err; + } + + ret = fdset_add(vduse.fdset, fd, vduse_reconnect_handler, NULL, dev); + if (ret) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to add reconnect efd %d to vduse fdset", + fd); + goto out_err_close; + } + + ret = eventfd_write(fd, (eventfd_t)1); + if (ret < 0) { + VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to write to reconnect eventfd"); + goto out_err_fdset; + } + + return 0; + +out_err_fdset: + fdset_del(vduse.fdset, fd); +out_err_close: + close(fd); +out_err: + return ret; +} + int vduse_device_create(const char *path, bool compliant_ol_flags) { - int control_fd, dev_fd, vid, ret, reco_fd; + int control_fd, dev_fd, vid, ret; uint32_t i, max_queue_pairs, total_queues; struct virtio_net *dev; struct virtio_net_config vnet_config = {{ 0 }}; uint64_t ver = VHOST_VDUSE_API_VERSION; uint64_t features; - struct vduse_dev_config *dev_config = NULL; const char *name = path + strlen("/dev/vduse/"); - char reconnect_file[PATH_MAX]; - struct vhost_reconnect_data *reconnect_log = NULL; bool reconnect = false; if (vduse.fdset == NULL) { @@ -499,20 +633,6 @@ vduse_device_create(const char *path, bool compliant_ol_flags) } } - if (vduse_reconnect_path_set == false) { - if (vduse_reconnect_path_init() < 0) { - VHOST_CONFIG_LOG(path, ERR, "failed to initialize reconnect path"); - return -1; - } - vduse_reconnect_path_set = true; - } - - ret = snprintf(reconnect_file, sizeof(reconnect_file), "%s/%s", vduse_reconnect_dir, name); - if (ret < 0 || ret == sizeof(reconnect_file)) { - VHOST_CONFIG_LOG(name, ERR, "Failed to create vduse reconnect path name"); - return -1; - } - control_fd = open(VDUSE_CTRL_PATH, O_RDWR); if (control_fd < 0) { VHOST_CONFIG_LOG(name, ERR, "Failed to open %s: %s", @@ -530,13 +650,13 @@ vduse_device_create(const char *path, bool compliant_ol_flags) ret = rte_vhost_driver_get_features(path, &features); if (ret < 0) { VHOST_CONFIG_LOG(name, ERR, "Failed to get backend features"); - goto out_free; + goto out_ctrl_close; } ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs); if (ret < 0) { VHOST_CONFIG_LOG(name, ERR, "Failed to get max queue pairs"); - goto out_free; + goto out_ctrl_close; } VHOST_CONFIG_LOG(path, INFO, "VDUSE max queue pairs: %u", max_queue_pairs); @@ -547,86 +667,12 @@ vduse_device_create(const char *path, bool compliant_ol_flags) else total_queues += 1; /* Includes ctrl queue */ - if (access(path, F_OK) == 0) { + dev_fd = open(path, O_RDWR); + if (dev_fd >= 0) { VHOST_CONFIG_LOG(name, INFO, "Device already exists, reconnecting..."); reconnect = true; - - reco_fd = open(reconnect_file, O_RDWR, 0600); - if (reco_fd < 0) { - if (errno == ENOENT) - VHOST_CONFIG_LOG(name, ERR, "Missing reconnect file (%s)", - reconnect_file); - else - VHOST_CONFIG_LOG(name, ERR, "Failed to open reconnect file %s (%s)", - reconnect_file, strerror(errno)); - ret = -1; - goto out_ctrl_close; - } - - reconnect_log = mmap(NULL, sizeof(*reconnect_log), PROT_READ | PROT_WRITE, - MAP_SHARED, reco_fd, 0); - close(reco_fd); - if (reconnect_log == MAP_FAILED) { - VHOST_CONFIG_LOG(name, ERR, "Failed to mmap reconnect file %s (%s)", - reconnect_file, strerror(errno)); - ret = -1; - goto out_ctrl_close; - } - - if (reconnect_log->version != VHOST_RECONNECT_VERSION) { - VHOST_CONFIG_LOG(name, ERR, - "Version mismatch between backend (0x%x) & reconnection file (0x%x)", - VHOST_RECONNECT_VERSION, reconnect_log->version); - } - - if ((reconnect_log->features & features) != reconnect_log->features) { - VHOST_CONFIG_LOG(name, ERR, - "Features mismatch between backend (0x%" PRIx64 ") & reconnection file (0x%" PRIx64 ")", - features, reconnect_log->features); - ret = -1; - goto out_ctrl_close; - } - - if (reconnect_log->nr_vrings != total_queues) { - VHOST_CONFIG_LOG(name, ERR, - "Queues number mismatch between backend (%u) and reconnection file (%u)", - total_queues, reconnect_log->nr_vrings); - ret = -1; - goto out_ctrl_close; - } - } else { - reco_fd = open(reconnect_file, O_CREAT | O_EXCL | O_RDWR, 0600); - if (reco_fd < 0) { - if (errno == EEXIST) { - VHOST_CONFIG_LOG(name, ERR, "Reconnect file %s exists but not the device", - reconnect_file); - } else { - VHOST_CONFIG_LOG(name, ERR, "Failed to open reconnect file %s (%s)", - reconnect_file, strerror(errno)); - } - ret = -1; - goto out_ctrl_close; - } - - ret = ftruncate(reco_fd, sizeof(*reconnect_log)); - if (ret < 0) { - VHOST_CONFIG_LOG(name, ERR, "Failed to truncate reconnect file %s (%s)", - reconnect_file, strerror(errno)); - close(reco_fd); - goto out_ctrl_close; - } - - reconnect_log = mmap(NULL, sizeof(*reconnect_log), PROT_READ | PROT_WRITE, - MAP_SHARED, reco_fd, 0); - close(reco_fd); - if (reconnect_log == MAP_FAILED) { - VHOST_CONFIG_LOG(name, ERR, "Failed to mmap reconnect file %s (%s)", - reconnect_file, strerror(errno)); - ret = -1; - goto out_ctrl_close; - } - - reconnect_log->version = VHOST_RECONNECT_VERSION; + } else if (errno == ENOENT) { + struct vduse_dev_config *dev_config; dev_config = malloc(offsetof(struct vduse_dev_config, config) + sizeof(vnet_config)); @@ -649,24 +695,26 @@ vduse_device_create(const char *path, bool compliant_ol_flags) memcpy(dev_config->config, &vnet_config, sizeof(vnet_config)); ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config); + free(dev_config); + dev_config = NULL; if (ret < 0) { VHOST_CONFIG_LOG(name, ERR, "Failed to create VDUSE device: %s", strerror(errno)); - goto out_free; + goto out_ctrl_close; } - memcpy(&reconnect_log->config, &vnet_config, sizeof(vnet_config)); - reconnect_log->nr_vrings = total_queues; - free(dev_config); - dev_config = NULL; - } - - dev_fd = open(path, O_RDWR); - if (dev_fd < 0) { + dev_fd = open(path, O_RDWR); + if (dev_fd < 0) { + VHOST_CONFIG_LOG(name, ERR, "Failed to open newly created device %s: %s", + path, strerror(errno)); + ret = -1; + goto out_ctrl_close; + } + } else { VHOST_CONFIG_LOG(name, ERR, "Failed to open device %s: %s", path, strerror(errno)); ret = -1; - goto out_dev_close; + goto out_ctrl_close; } ret = fcntl(dev_fd, F_SETFL, O_NONBLOCK); @@ -686,15 +734,28 @@ vduse_device_create(const char *path, bool compliant_ol_flags) dev = get_device(vid); if (!dev) { ret = -1; - goto out_dev_close; + goto out_dev_destroy; } strncpy(dev->ifname, path, IF_NAME_SZ - 1); dev->vduse_ctrl_fd = control_fd; dev->vduse_dev_fd = dev_fd; - dev->reconnect_log = reconnect_log; - if (reconnect) + + ret = vduse_reconnect_log_map(dev, !reconnect); + if (ret < 0) + goto out_dev_destroy; + + if (reconnect) { + ret = vduse_reconnect_log_check(dev, features, total_queues); + if (ret < 0) + goto out_log_unmap; + dev->status = dev->reconnect_log->status; + } else { + dev->reconnect_log->version = VHOST_RECONNECT_VERSION; + dev->reconnect_log->nr_vrings = total_queues; + memcpy(&dev->reconnect_log->config, &vnet_config, sizeof(vnet_config)); + } vhost_setup_virtio_net(dev->vid, true, compliant_ol_flags, true, true); @@ -705,11 +766,11 @@ vduse_device_create(const char *path, bool compliant_ol_flags) ret = alloc_vring_queue(dev, i); if (ret) { VHOST_CONFIG_LOG(name, ERR, "Failed to alloc vring %d metadata", i); - goto out_dev_destroy; + goto out_log_unmap; } vq = dev->virtqueue[i]; - vq->reconnect_log = &reconnect_log->vring[i]; + vq->reconnect_log = &dev->reconnect_log->vring[i]; if (reconnect) continue; @@ -720,7 +781,7 @@ vduse_device_create(const char *path, bool compliant_ol_flags) ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP, &vq_cfg); if (ret) { VHOST_CONFIG_LOG(name, ERR, "Failed to set-up VQ %d", i); - goto out_dev_destroy; + goto out_log_unmap; } } @@ -730,46 +791,25 @@ vduse_device_create(const char *path, bool compliant_ol_flags) if (ret) { VHOST_CONFIG_LOG(name, ERR, "Failed to add fd %d to vduse fdset", dev->vduse_dev_fd); - goto out_dev_destroy; + goto out_log_unmap; } if (reconnect && dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK) { - /* - * Make vduse_device_start() being executed in the same - * context for both reconnection and fresh startup. - */ - reco_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); - if (reco_fd < 0) { - VHOST_CONFIG_LOG(name, ERR, "Failed to create reco_fd: %s", - strerror(errno)); - ret = -1; - goto out_dev_destroy; - } - - ret = fdset_add(vduse.fdset, reco_fd, vduse_reconnect_handler, NULL, dev); - if (ret) { - VHOST_CONFIG_LOG(name, ERR, "Failed to add reconnect fd %d to vduse fdset", - reco_fd); - goto out_dev_destroy; - } - - ret = eventfd_write(reco_fd, (eventfd_t)1); - if (ret < 0) { - VHOST_CONFIG_LOG(name, ERR, "Failed to write to reconnect eventfd"); - goto out_dev_destroy; - } + ret = vduse_reconnect_start_device(dev); + if (ret) + goto out_log_unmap; } return 0; +out_log_unmap: + munmap(dev->reconnect_log, sizeof(*dev->reconnect_log)); out_dev_destroy: vhost_destroy_device(vid); out_dev_close: if (dev_fd >= 0) close(dev_fd); ioctl(control_fd, VDUSE_DESTROY_DEV, name); -out_free: - free(dev_config); out_ctrl_close: close(control_fd); diff --git a/lib/vhost/vduse.h b/lib/vhost/vduse.h index 0d8f3f1205..47ca97a064 100644 --- a/lib/vhost/vduse.h +++ b/lib/vhost/vduse.h @@ -9,29 +9,7 @@ #define VDUSE_NET_SUPPORTED_FEATURES VIRTIO_NET_SUPPORTED_FEATURES -#ifdef VHOST_HAS_VDUSE - int vduse_device_create(const char *path, bool compliant_ol_flags); int vduse_device_destroy(const char *path); -#else - -static inline int -vduse_device_create(const char *path, bool compliant_ol_flags) -{ - RTE_SET_USED(compliant_ol_flags); - - VHOST_CONFIG_LOG(path, ERR, "VDUSE support disabled at build time"); - return -1; -} - -static inline int -vduse_device_destroy(const char *path) -{ - VHOST_CONFIG_LOG(path, ERR, "VDUSE support disabled at build time"); - return -1; -} - -#endif /* VHOST_HAS_VDUSE */ - #endif /* _VDUSE_H */ diff --git a/meson.build b/meson.build index fe9040369a..c59ab7430a 100644 --- a/meson.build +++ b/meson.build @@ -64,16 +64,20 @@ endif # configure the build, and make sure configs here and in config folder are # able to be included in any file. We also store a global array of include dirs # for passing to pmdinfogen scripts -global_inc = include_directories('.', 'config', +global_inc = [include_directories('.', 'config', 'lib/eal/include', 'lib/eal/@0@/include'.format(host_machine.system()), 'lib/eal/@0@/include'.format(arch_subdir), -) +)] # do configuration and get tool paths subdir('buildtools') subdir('config') +if is_linux + global_inc += include_directories('kernel/linux') +endif + # build libs and drivers subdir('lib') subdir('drivers')
teid %d
type "); - print_elem_type(stream, data->data.elem_type); - fprintf(stream, "
teid%d
type%s
TXQ%u