DPDK  18.08.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
140 #ifdef __cplusplus
141 extern "C" {
142 #endif
143 
144 #include <stdint.h>
145 
146 /* Use this macro to check if LRO API is supported */
147 #define RTE_ETHDEV_HAS_LRO_SUPPORT
148 
149 #include <rte_compat.h>
150 #include <rte_log.h>
151 #include <rte_interrupts.h>
152 #include <rte_dev.h>
153 #include <rte_devargs.h>
154 #include <rte_errno.h>
155 #include <rte_common.h>
156 #include <rte_config.h>
157 
158 #include "rte_ether.h"
159 #include "rte_eth_ctrl.h"
160 #include "rte_dev_info.h"
161 
162 extern int rte_eth_dev_logtype;
163 
164 #define RTE_ETHDEV_LOG(level, ...) \
165  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
166 
167 struct rte_mbuf;
168 
176  uint64_t ipackets;
177  uint64_t opackets;
178  uint64_t ibytes;
179  uint64_t obytes;
180  uint64_t imissed;
184  uint64_t ierrors;
185  uint64_t oerrors;
186  uint64_t rx_nombuf;
187  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
189  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
191  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
193  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
195  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
197 };
198 
202 #define ETH_LINK_SPEED_AUTONEG (0 << 0)
203 #define ETH_LINK_SPEED_FIXED (1 << 0)
204 #define ETH_LINK_SPEED_10M_HD (1 << 1)
205 #define ETH_LINK_SPEED_10M (1 << 2)
206 #define ETH_LINK_SPEED_100M_HD (1 << 3)
207 #define ETH_LINK_SPEED_100M (1 << 4)
208 #define ETH_LINK_SPEED_1G (1 << 5)
209 #define ETH_LINK_SPEED_2_5G (1 << 6)
210 #define ETH_LINK_SPEED_5G (1 << 7)
211 #define ETH_LINK_SPEED_10G (1 << 8)
212 #define ETH_LINK_SPEED_20G (1 << 9)
213 #define ETH_LINK_SPEED_25G (1 << 10)
214 #define ETH_LINK_SPEED_40G (1 << 11)
215 #define ETH_LINK_SPEED_50G (1 << 12)
216 #define ETH_LINK_SPEED_56G (1 << 13)
217 #define ETH_LINK_SPEED_100G (1 << 14)
222 #define ETH_SPEED_NUM_NONE 0
223 #define ETH_SPEED_NUM_10M 10
224 #define ETH_SPEED_NUM_100M 100
225 #define ETH_SPEED_NUM_1G 1000
226 #define ETH_SPEED_NUM_2_5G 2500
227 #define ETH_SPEED_NUM_5G 5000
228 #define ETH_SPEED_NUM_10G 10000
229 #define ETH_SPEED_NUM_20G 20000
230 #define ETH_SPEED_NUM_25G 25000
231 #define ETH_SPEED_NUM_40G 40000
232 #define ETH_SPEED_NUM_50G 50000
233 #define ETH_SPEED_NUM_56G 56000
234 #define ETH_SPEED_NUM_100G 100000
239 __extension__
240 struct rte_eth_link {
241  uint32_t link_speed;
242  uint16_t link_duplex : 1;
243  uint16_t link_autoneg : 1;
244  uint16_t link_status : 1;
245 } __attribute__((aligned(8)));
247 /* Utility constants */
248 #define ETH_LINK_HALF_DUPLEX 0
249 #define ETH_LINK_FULL_DUPLEX 1
250 #define ETH_LINK_DOWN 0
251 #define ETH_LINK_UP 1
252 #define ETH_LINK_FIXED 0
253 #define ETH_LINK_AUTONEG 1
259 struct rte_eth_thresh {
260  uint8_t pthresh;
261  uint8_t hthresh;
262  uint8_t wthresh;
263 };
264 
268 #define ETH_MQ_RX_RSS_FLAG 0x1
269 #define ETH_MQ_RX_DCB_FLAG 0x2
270 #define ETH_MQ_RX_VMDQ_FLAG 0x4
271 
279 
283  ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
285  ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
286 
288  ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
290  ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
292  ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
295  ETH_MQ_RX_VMDQ_FLAG,
296 };
297 
301 #define ETH_RSS ETH_MQ_RX_RSS
302 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
303 #define ETH_DCB_RX ETH_MQ_RX_DCB
304 
314 };
315 
319 #define ETH_DCB_NONE ETH_MQ_TX_NONE
320 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
321 #define ETH_DCB_TX ETH_MQ_TX_DCB
322 
329  uint32_t max_rx_pkt_len;
330  uint16_t split_hdr_size;
336  uint64_t offloads;
337 };
338 
344  ETH_VLAN_TYPE_UNKNOWN = 0,
347  ETH_VLAN_TYPE_MAX,
348 };
349 
355  uint64_t ids[64];
356 };
357 
376  uint8_t *rss_key;
377  uint8_t rss_key_len;
378  uint64_t rss_hf;
379 };
380 
381 /*
382  * The RSS offload types are defined based on flow types which are defined
383  * in rte_eth_ctrl.h. Different NIC hardwares may support different RSS offload
384  * types. The supported flow types or RSS offload types can be queried by
385  * rte_eth_dev_info_get().
386  */
387 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4)
388 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
389 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
390 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
391 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
392 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
393 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6)
394 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
395 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
396 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
397 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
398 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
399 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
400 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
401 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
402 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
403 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
404 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
405 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
406 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
407 
408 #define ETH_RSS_IP ( \
409  ETH_RSS_IPV4 | \
410  ETH_RSS_FRAG_IPV4 | \
411  ETH_RSS_NONFRAG_IPV4_OTHER | \
412  ETH_RSS_IPV6 | \
413  ETH_RSS_FRAG_IPV6 | \
414  ETH_RSS_NONFRAG_IPV6_OTHER | \
415  ETH_RSS_IPV6_EX)
416 
417 #define ETH_RSS_UDP ( \
418  ETH_RSS_NONFRAG_IPV4_UDP | \
419  ETH_RSS_NONFRAG_IPV6_UDP | \
420  ETH_RSS_IPV6_UDP_EX)
421 
422 #define ETH_RSS_TCP ( \
423  ETH_RSS_NONFRAG_IPV4_TCP | \
424  ETH_RSS_NONFRAG_IPV6_TCP | \
425  ETH_RSS_IPV6_TCP_EX)
426 
427 #define ETH_RSS_SCTP ( \
428  ETH_RSS_NONFRAG_IPV4_SCTP | \
429  ETH_RSS_NONFRAG_IPV6_SCTP)
430 
431 #define ETH_RSS_TUNNEL ( \
432  ETH_RSS_VXLAN | \
433  ETH_RSS_GENEVE | \
434  ETH_RSS_NVGRE)
435 
437 #define ETH_RSS_PROTO_MASK ( \
438  ETH_RSS_IPV4 | \
439  ETH_RSS_FRAG_IPV4 | \
440  ETH_RSS_NONFRAG_IPV4_TCP | \
441  ETH_RSS_NONFRAG_IPV4_UDP | \
442  ETH_RSS_NONFRAG_IPV4_SCTP | \
443  ETH_RSS_NONFRAG_IPV4_OTHER | \
444  ETH_RSS_IPV6 | \
445  ETH_RSS_FRAG_IPV6 | \
446  ETH_RSS_NONFRAG_IPV6_TCP | \
447  ETH_RSS_NONFRAG_IPV6_UDP | \
448  ETH_RSS_NONFRAG_IPV6_SCTP | \
449  ETH_RSS_NONFRAG_IPV6_OTHER | \
450  ETH_RSS_L2_PAYLOAD | \
451  ETH_RSS_IPV6_EX | \
452  ETH_RSS_IPV6_TCP_EX | \
453  ETH_RSS_IPV6_UDP_EX | \
454  ETH_RSS_PORT | \
455  ETH_RSS_VXLAN | \
456  ETH_RSS_GENEVE | \
457  ETH_RSS_NVGRE)
458 
459 /*
460  * Definitions used for redirection table entry size.
461  * Some RSS RETA sizes may not be supported by some drivers, check the
462  * documentation or the description of relevant functions for more details.
463  */
464 #define ETH_RSS_RETA_SIZE_64 64
465 #define ETH_RSS_RETA_SIZE_128 128
466 #define ETH_RSS_RETA_SIZE_256 256
467 #define ETH_RSS_RETA_SIZE_512 512
468 #define RTE_RETA_GROUP_SIZE 64
469 
470 /* Definitions used for VMDQ and DCB functionality */
471 #define ETH_VMDQ_MAX_VLAN_FILTERS 64
472 #define ETH_DCB_NUM_USER_PRIORITIES 8
473 #define ETH_VMDQ_DCB_NUM_QUEUES 128
474 #define ETH_DCB_NUM_QUEUES 128
476 /* DCB capability defines */
477 #define ETH_DCB_PG_SUPPORT 0x00000001
478 #define ETH_DCB_PFC_SUPPORT 0x00000002
480 /* Definitions used for VLAN Offload functionality */
481 #define ETH_VLAN_STRIP_OFFLOAD 0x0001
482 #define ETH_VLAN_FILTER_OFFLOAD 0x0002
483 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004
485 /* Definitions used for mask VLAN setting */
486 #define ETH_VLAN_STRIP_MASK 0x0001
487 #define ETH_VLAN_FILTER_MASK 0x0002
488 #define ETH_VLAN_EXTEND_MASK 0x0004
489 #define ETH_VLAN_ID_MAX 0x0FFF
491 /* Definitions used for receive MAC address */
492 #define ETH_NUM_RECEIVE_MAC_ADDR 128
494 /* Definitions used for unicast hash */
495 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128
497 /* Definitions used for VMDQ pool rx mode setting */
498 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001
499 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002
500 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004
501 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008
502 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010
505 #define ETH_MIRROR_MAX_VLANS 64
506 
507 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01
508 #define ETH_MIRROR_UPLINK_PORT 0x02
509 #define ETH_MIRROR_DOWNLINK_PORT 0x04
510 #define ETH_MIRROR_VLAN 0x08
511 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10
516 struct rte_eth_vlan_mirror {
517  uint64_t vlan_mask;
519  uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
520 };
521 
526  uint8_t rule_type;
527  uint8_t dst_pool;
528  uint64_t pool_mask;
531 };
532 
540  uint64_t mask;
542  uint16_t reta[RTE_RETA_GROUP_SIZE];
544 };
545 
551  ETH_4_TCS = 4,
553 };
554 
564 };
565 
566 /* This structure may be extended in future. */
567 struct rte_eth_dcb_rx_conf {
568  enum rte_eth_nb_tcs nb_tcs;
570  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
571 };
572 
573 struct rte_eth_vmdq_dcb_tx_conf {
574  enum rte_eth_nb_pools nb_queue_pools;
576  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
577 };
578 
579 struct rte_eth_dcb_tx_conf {
580  enum rte_eth_nb_tcs nb_tcs;
582  uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
583 };
584 
585 struct rte_eth_vmdq_tx_conf {
586  enum rte_eth_nb_pools nb_queue_pools;
587 };
588 
603  uint8_t default_pool;
604  uint8_t nb_pool_maps;
605  struct {
606  uint16_t vlan_id;
607  uint64_t pools;
611 };
612 
634  uint8_t default_pool;
636  uint8_t nb_pool_maps;
637  uint32_t rx_mode;
638  struct {
639  uint16_t vlan_id;
640  uint64_t pools;
642 };
643 
654  uint64_t offloads;
655 
656  /* For i40e specifically */
657  uint16_t pvid;
658  __extension__
659  uint8_t hw_vlan_reject_tagged : 1,
665 };
666 
672  uint16_t rx_free_thresh;
673  uint8_t rx_drop_en;
680  uint64_t offloads;
681 };
682 
688  uint16_t tx_rs_thresh;
689  uint16_t tx_free_thresh;
698  uint64_t offloads;
699 };
700 
705  uint16_t nb_max;
706  uint16_t nb_min;
707  uint16_t nb_align;
717  uint16_t nb_seg_max;
718 
730  uint16_t nb_mtu_seg_max;
731 };
732 
741 };
742 
749  uint32_t high_water;
750  uint32_t low_water;
751  uint16_t pause_time;
752  uint16_t send_xon;
755  uint8_t autoneg;
756 };
757 
765  uint8_t priority;
766 };
767 
776 };
777 
785 };
786 
798  uint8_t drop_queue;
799  struct rte_eth_fdir_masks mask;
802 };
803 
812  uint16_t udp_port;
813  uint8_t prot_type;
814 };
815 
821  uint32_t lsc:1;
823  uint32_t rxq:1;
825  uint32_t rmv:1;
826 };
827 
833 struct rte_eth_conf {
834  uint32_t link_speeds;
843  uint32_t lpbk_mode;
848  struct {
852  struct rte_eth_dcb_rx_conf dcb_rx_conf;
856  } rx_adv_conf;
857  union {
858  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
860  struct rte_eth_dcb_tx_conf dcb_tx_conf;
862  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
864  } tx_adv_conf;
870 };
871 
881 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
882 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
883 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
884 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
885 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
886 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
887 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
888 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
889 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
890 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
891 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
892 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
893 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
894 #define DEV_RX_OFFLOAD_SCATTER 0x00002000
895 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
896 #define DEV_RX_OFFLOAD_SECURITY 0x00008000
897 
902 #define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
903 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
904  DEV_RX_OFFLOAD_UDP_CKSUM | \
905  DEV_RX_OFFLOAD_TCP_CKSUM)
906 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
907  DEV_RX_OFFLOAD_VLAN_FILTER | \
908  DEV_RX_OFFLOAD_VLAN_EXTEND)
909 
910 /*
911  * If new Rx offload capabilities are defined, they also must be
912  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
913  */
914 
918 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
919 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
920 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
921 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
922 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
923 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
924 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
925 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080
926 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100
927 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200
928 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400
929 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800
930 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000
931 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
932 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000
933 
936 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
937 
938 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
939 
943 #define DEV_TX_OFFLOAD_SECURITY 0x00020000
944 
949 #define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
950 
955 #define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
956 
957 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
958 
959 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
960 
962 /*
963  * If new Tx offload capabilities are defined, they also must be
964  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
965  */
966 
967 /*
968  * Fallback default preferred Rx/Tx port parameters.
969  * These are used if an application requests default parameters
970  * but the PMD does not provide preferred values.
971  */
972 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
973 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
974 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
975 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
976 
983  uint16_t burst_size;
984  uint16_t ring_size;
985  uint16_t nb_queues;
986 };
987 
992 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
993 
998  const char *name;
999  uint16_t domain_id;
1000  uint16_t port_id;
1008 };
1009 
1014  struct rte_device *device;
1015  const char *driver_name;
1016  unsigned int if_index;
1018  const uint32_t *dev_flags;
1019  uint32_t min_rx_bufsize;
1020  uint32_t max_rx_pktlen;
1021  uint16_t max_rx_queues;
1022  uint16_t max_tx_queues;
1023  uint32_t max_mac_addrs;
1024  uint32_t max_hash_mac_addrs;
1026  uint16_t max_vfs;
1027  uint16_t max_vmdq_pools;
1036  uint16_t reta_size;
1038  uint8_t hash_key_size;
1043  uint16_t vmdq_queue_base;
1044  uint16_t vmdq_queue_num;
1045  uint16_t vmdq_pool_base;
1048  uint32_t speed_capa;
1050  uint16_t nb_rx_queues;
1051  uint16_t nb_tx_queues;
1057  uint64_t dev_capa;
1063 };
1064 
1070  struct rte_mempool *mp;
1072  uint8_t scattered_rx;
1073  uint16_t nb_desc;
1075 
1082  uint16_t nb_desc;
1084 
1086 #define RTE_ETH_XSTATS_NAME_SIZE 64
1087 
1098  uint64_t id;
1099  uint64_t value;
1100 };
1101 
1111 };
1112 
1113 #define ETH_DCB_NUM_TCS 8
1114 #define ETH_MAX_VMDQ_POOL 64
1115 
1122  struct {
1123  uint8_t base;
1124  uint8_t nb_queue;
1125  } tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1127  struct {
1128  uint8_t base;
1129  uint8_t nb_queue;
1130  } tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1131 };
1132 
1138  uint8_t nb_tcs;
1140  uint8_t tc_bws[ETH_DCB_NUM_TCS];
1143 };
1144 
1148 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1149 #define RTE_ETH_QUEUE_STATE_STARTED 1
1150 
1151 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1152 
1153 /* Macros to check for valid port */
1154 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1155  if (!rte_eth_dev_is_valid_port(port_id)) { \
1156  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1157  return retval; \
1158  } \
1159 } while (0)
1160 
1161 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1162  if (!rte_eth_dev_is_valid_port(port_id)) { \
1163  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1164  return; \
1165  } \
1166 } while (0)
1167 
1173 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001
1174 
1175 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002
1176 
1177 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004
1178 
1179 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008
1180 
1203 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1204  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1205  void *user_param);
1206 
1227 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1228  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1229 
1242 };
1243 
1244 struct rte_eth_dev_sriov {
1245  uint8_t active;
1246  uint8_t nb_q_per_pool;
1247  uint16_t def_vmdq_idx;
1248  uint16_t def_pool_q_idx;
1249 };
1250 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
1251 
1252 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
1253 
1254 #define RTE_ETH_DEV_NO_OWNER 0
1255 
1256 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
1257 
1258 struct rte_eth_dev_owner {
1259  uint64_t id;
1260  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1261 };
1262 
1264 #define RTE_ETH_DEV_INTR_LSC 0x0002
1265 
1266 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004
1267 
1268 #define RTE_ETH_DEV_INTR_RMV 0x0008
1269 
1270 #define RTE_ETH_DEV_REPRESENTOR 0x0010
1271 
1283 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
1284  const uint64_t owner_id);
1285 
1289 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
1290  for (p = rte_eth_find_next_owned_by(0, o); \
1291  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
1292  p = rte_eth_find_next_owned_by(p + 1, o))
1293 
1302 uint16_t rte_eth_find_next(uint16_t port_id);
1303 
1307 #define RTE_ETH_FOREACH_DEV(p) \
1308  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
1309 
1310 
1324 int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id);
1325 
1339 int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id,
1340  const struct rte_eth_dev_owner *owner);
1341 
1355 int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id,
1356  const uint64_t owner_id);
1357 
1367 void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id);
1368 
1382 int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
1383  struct rte_eth_dev_owner *owner);
1384 
1397 __rte_deprecated
1398 uint16_t rte_eth_dev_count(void);
1399 
1410 uint16_t rte_eth_dev_count_avail(void);
1411 
1420 uint16_t __rte_experimental rte_eth_dev_count_total(void);
1421 
1434 __rte_deprecated
1435 int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
1436 
1450 __rte_deprecated
1451 int rte_eth_dev_detach(uint16_t port_id, char *devname);
1452 
1464 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
1465 
1477 const char * __rte_experimental rte_eth_dev_rx_offload_name(uint64_t offload);
1478 
1490 const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
1491 
1531 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
1532  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
1533 
1545 int __rte_experimental
1546 rte_eth_dev_is_removed(uint16_t port_id);
1547 
1597 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1598  uint16_t nb_rx_desc, unsigned int socket_id,
1599  const struct rte_eth_rxconf *rx_conf,
1600  struct rte_mempool *mb_pool);
1601 
1650 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1651  uint16_t nb_tx_desc, unsigned int socket_id,
1652  const struct rte_eth_txconf *tx_conf);
1653 
1664 int rte_eth_dev_socket_id(uint16_t port_id);
1665 
1675 int rte_eth_dev_is_valid_port(uint16_t port_id);
1676 
1693 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
1694 
1710 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
1711 
1728 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
1729 
1745 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
1746 
1762 int rte_eth_dev_start(uint16_t port_id);
1763 
1771 void rte_eth_dev_stop(uint16_t port_id);
1772 
1785 int rte_eth_dev_set_link_up(uint16_t port_id);
1786 
1796 int rte_eth_dev_set_link_down(uint16_t port_id);
1797 
1806 void rte_eth_dev_close(uint16_t port_id);
1807 
1845 int rte_eth_dev_reset(uint16_t port_id);
1846 
1853 void rte_eth_promiscuous_enable(uint16_t port_id);
1854 
1861 void rte_eth_promiscuous_disable(uint16_t port_id);
1862 
1873 int rte_eth_promiscuous_get(uint16_t port_id);
1874 
1881 void rte_eth_allmulticast_enable(uint16_t port_id);
1882 
1889 void rte_eth_allmulticast_disable(uint16_t port_id);
1890 
1901 int rte_eth_allmulticast_get(uint16_t port_id);
1902 
1914 void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
1915 
1927 void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
1928 
1946 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
1947 
1958 int rte_eth_stats_reset(uint16_t port_id);
1959 
1989 int rte_eth_xstats_get_names(uint16_t port_id,
1990  struct rte_eth_xstat_name *xstats_names,
1991  unsigned int size);
1992 
2022 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2023  unsigned int n);
2024 
2047 int
2048 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2049  struct rte_eth_xstat_name *xstats_names, unsigned int size,
2050  uint64_t *ids);
2051 
2075 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2076  uint64_t *values, unsigned int size);
2077 
2096 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2097  uint64_t *id);
2098 
2105 void rte_eth_xstats_reset(uint16_t port_id);
2106 
2124 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
2125  uint16_t tx_queue_id, uint8_t stat_idx);
2126 
2144 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
2145  uint16_t rx_queue_id,
2146  uint8_t stat_idx);
2147 
2157 void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
2158 
2168 void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
2169 
2189 int rte_eth_dev_fw_version_get(uint16_t port_id,
2190  char *fw_version, size_t fw_size);
2191 
2230 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2231  uint32_t *ptypes, int num);
2232 
2244 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
2245 
2261 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
2262 
2282 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
2283 
2303 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2304  int on);
2305 
2323 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2324  enum rte_vlan_type vlan_type,
2325  uint16_t tag_type);
2326 
2348 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
2349 
2362 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
2363 
2378 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
2379 
2380 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
2381  void *userdata);
2382 
2388  buffer_tx_error_fn error_callback;
2389  void *error_userdata;
2390  uint16_t size;
2391  uint16_t length;
2392  struct rte_mbuf *pkts[];
2394 };
2395 
2402 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
2403  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
2404 
2415 int
2416 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
2417 
2442 int
2444  buffer_tx_error_fn callback, void *userdata);
2445 
2468 void
2469 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2470  void *userdata);
2471 
2495 void
2496 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2497  void *userdata);
2498 
2524 int
2525 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
2526 
2542 };
2543 
2551  uint64_t metadata;
2565 };
2566 
2584 };
2585 
2586 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
2587  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
2607 int rte_eth_dev_callback_register(uint16_t port_id,
2608  enum rte_eth_event_type event,
2609  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2610 
2629 int rte_eth_dev_callback_unregister(uint16_t port_id,
2630  enum rte_eth_event_type event,
2631  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
2632 
2654 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
2655 
2676 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
2677 
2695 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
2696 
2718 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2719  int epfd, int op, void *data);
2720 
2734 int rte_eth_led_on(uint16_t port_id);
2735 
2749 int rte_eth_led_off(uint16_t port_id);
2750 
2764 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
2765  struct rte_eth_fc_conf *fc_conf);
2766 
2781 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
2782  struct rte_eth_fc_conf *fc_conf);
2783 
2799 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2800  struct rte_eth_pfc_conf *pfc_conf);
2801 
2821 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr,
2822  uint32_t pool);
2823 
2837 int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr);
2838 
2852 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
2853  struct ether_addr *mac_addr);
2854 
2871 int rte_eth_dev_rss_reta_update(uint16_t port_id,
2872  struct rte_eth_rss_reta_entry64 *reta_conf,
2873  uint16_t reta_size);
2874 
2891 int rte_eth_dev_rss_reta_query(uint16_t port_id,
2892  struct rte_eth_rss_reta_entry64 *reta_conf,
2893  uint16_t reta_size);
2894 
2914 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2915  uint8_t on);
2916 
2935 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
2936 
2959 int rte_eth_mirror_rule_set(uint16_t port_id,
2960  struct rte_eth_mirror_conf *mirror_conf,
2961  uint8_t rule_id,
2962  uint8_t on);
2963 
2978 int rte_eth_mirror_rule_reset(uint16_t port_id,
2979  uint8_t rule_id);
2980 
2997 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2998  uint16_t tx_rate);
2999 
3014 int rte_eth_dev_rss_hash_update(uint16_t port_id,
3015  struct rte_eth_rss_conf *rss_conf);
3016 
3031 int
3032 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3033  struct rte_eth_rss_conf *rss_conf);
3034 
3053 int
3054 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3055  struct rte_eth_udp_tunnel *tunnel_udp);
3056 
3076 int
3077 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3078  struct rte_eth_udp_tunnel *tunnel_udp);
3079 
3094 int rte_eth_dev_filter_supported(uint16_t port_id,
3095  enum rte_filter_type filter_type);
3096 
3116 int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3117  enum rte_filter_op filter_op, void *arg);
3118 
3132 int rte_eth_dev_get_dcb_info(uint16_t port_id,
3133  struct rte_eth_dcb_info *dcb_info);
3134 
3135 struct rte_eth_rxtx_callback;
3136 
3161 const struct rte_eth_rxtx_callback *
3162 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3163  rte_rx_callback_fn fn, void *user_param);
3164 
3190 const struct rte_eth_rxtx_callback *
3191 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3192  rte_rx_callback_fn fn, void *user_param);
3193 
3218 const struct rte_eth_rxtx_callback *
3219 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3220  rte_tx_callback_fn fn, void *user_param);
3221 
3252 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3253  const struct rte_eth_rxtx_callback *user_cb);
3254 
3285 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3286  const struct rte_eth_rxtx_callback *user_cb);
3287 
3305 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3306  struct rte_eth_rxq_info *qinfo);
3307 
3325 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3326  struct rte_eth_txq_info *qinfo);
3327 
3345 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
3346 
3359 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
3360 
3376 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3377 
3393 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
3394 
3412 int __rte_experimental
3413 rte_eth_dev_get_module_info(uint16_t port_id,
3414  struct rte_eth_dev_module_info *modinfo);
3415 
3434 int __rte_experimental
3435 rte_eth_dev_get_module_eeprom(uint16_t port_id,
3436  struct rte_dev_eeprom_info *info);
3437 
3456 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3457  struct ether_addr *mc_addr_set,
3458  uint32_t nb_mc_addr);
3459 
3472 int rte_eth_timesync_enable(uint16_t port_id);
3473 
3486 int rte_eth_timesync_disable(uint16_t port_id);
3487 
3506 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
3507  struct timespec *timestamp, uint32_t flags);
3508 
3524 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3525  struct timespec *timestamp);
3526 
3544 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
3545 
3560 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
3561 
3580 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
3581 
3597 int
3598 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3599  struct rte_eth_l2_tunnel_conf *l2_tunnel);
3600 
3625 int
3626 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3627  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3628  uint32_t mask,
3629  uint8_t en);
3630 
3646 int
3647 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
3648 
3663 int
3664 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
3665 
3682 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3683  uint16_t *nb_rx_desc,
3684  uint16_t *nb_tx_desc);
3685 
3700 int
3701 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
3702 
3712 void *
3713 rte_eth_dev_get_sec_ctx(uint16_t port_id);
3714 
3715 
3716 #include <rte_ethdev_core.h>
3717 
3800 static inline uint16_t
3801 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
3802  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
3803 {
3804  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3805  uint16_t nb_rx;
3806 
3807 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3808  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3809  RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3810 
3811  if (queue_id >= dev->data->nb_rx_queues) {
3812  RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
3813  return 0;
3814  }
3815 #endif
3816  nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3817  rx_pkts, nb_pkts);
3818 
3819 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
3820  if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
3821  struct rte_eth_rxtx_callback *cb =
3822  dev->post_rx_burst_cbs[queue_id];
3823 
3824  do {
3825  nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3826  nb_pkts, cb->param);
3827  cb = cb->next;
3828  } while (cb != NULL);
3829  }
3830 #endif
3831 
3832  return nb_rx;
3833 }
3834 
3847 static inline int
3848 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
3849 {
3850  struct rte_eth_dev *dev;
3851 
3852  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3853  dev = &rte_eth_devices[port_id];
3854  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3855  if (queue_id >= dev->data->nb_rx_queues)
3856  return -EINVAL;
3857 
3858  return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
3859 }
3860 
3876 static inline int
3877 rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
3878 {
3879  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3880  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3881  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3882  return (*dev->dev_ops->rx_descriptor_done)( \
3883  dev->data->rx_queues[queue_id], offset);
3884 }
3885 
3886 #define RTE_ETH_RX_DESC_AVAIL 0
3887 #define RTE_ETH_RX_DESC_DONE 1
3888 #define RTE_ETH_RX_DESC_UNAVAIL 2
3923 static inline int
3924 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
3925  uint16_t offset)
3926 {
3927  struct rte_eth_dev *dev;
3928  void *rxq;
3929 
3930 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3931  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3932 #endif
3933  dev = &rte_eth_devices[port_id];
3934 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3935  if (queue_id >= dev->data->nb_rx_queues)
3936  return -ENODEV;
3937 #endif
3938  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3939  rxq = dev->data->rx_queues[queue_id];
3940 
3941  return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3942 }
3943 
3944 #define RTE_ETH_TX_DESC_FULL 0
3945 #define RTE_ETH_TX_DESC_DONE 1
3946 #define RTE_ETH_TX_DESC_UNAVAIL 2
3981 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
3982  uint16_t queue_id, uint16_t offset)
3983 {
3984  struct rte_eth_dev *dev;
3985  void *txq;
3986 
3987 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3988  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3989 #endif
3990  dev = &rte_eth_devices[port_id];
3991 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3992  if (queue_id >= dev->data->nb_tx_queues)
3993  return -ENODEV;
3994 #endif
3995  RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
3996  txq = dev->data->tx_queues[queue_id];
3997 
3998  return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
3999 }
4000 
4067 static inline uint16_t
4068 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
4069  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4070 {
4071  struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4072 
4073 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4074  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
4075  RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
4076 
4077  if (queue_id >= dev->data->nb_tx_queues) {
4078  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4079  return 0;
4080  }
4081 #endif
4082 
4083 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
4084  struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
4085 
4086  if (unlikely(cb != NULL)) {
4087  do {
4088  nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
4089  cb->param);
4090  cb = cb->next;
4091  } while (cb != NULL);
4092  }
4093 #endif
4094 
4095  return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
4096 }
4097 
4154 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
4155 
4156 static inline uint16_t
4157 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
4158  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4159 {
4160  struct rte_eth_dev *dev;
4161 
4162 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4163  if (!rte_eth_dev_is_valid_port(port_id)) {
4164  RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
4165  rte_errno = -EINVAL;
4166  return 0;
4167  }
4168 #endif
4169 
4170  dev = &rte_eth_devices[port_id];
4171 
4172 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4173  if (queue_id >= dev->data->nb_tx_queues) {
4174  RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4175  rte_errno = -EINVAL;
4176  return 0;
4177  }
4178 #endif
4179 
4180  if (!dev->tx_pkt_prepare)
4181  return nb_pkts;
4182 
4183  return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4184  tx_pkts, nb_pkts);
4185 }
4186 
4187 #else
4188 
4189 /*
4190  * Native NOOP operation for compilation targets which doesn't require any
4191  * preparations steps, and functional NOOP may introduce unnecessary performance
4192  * drop.
4193  *
4194  * Generally this is not a good idea to turn it on globally and didn't should
4195  * be used if behavior of tx_preparation can change.
4196  */
4197 
4198 static inline uint16_t
4199 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
4200  __rte_unused uint16_t queue_id,
4201  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
4202 {
4203  return nb_pkts;
4204 }
4205 
4206 #endif
4207 
4230 static inline uint16_t
4231 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
4232  struct rte_eth_dev_tx_buffer *buffer)
4233 {
4234  uint16_t sent;
4235  uint16_t to_send = buffer->length;
4236 
4237  if (to_send == 0)
4238  return 0;
4239 
4240  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
4241 
4242  buffer->length = 0;
4243 
4244  /* All packets sent, or to be dealt with by callback below */
4245  if (unlikely(sent != to_send))
4246  buffer->error_callback(&buffer->pkts[sent],
4247  (uint16_t)(to_send - sent),
4248  buffer->error_userdata);
4249 
4250  return sent;
4251 }
4252 
4283 static __rte_always_inline uint16_t
4284 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
4285  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
4286 {
4287  buffer->pkts[buffer->length++] = tx_pkt;
4288  if (buffer->length < buffer->size)
4289  return 0;
4290 
4291  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
4292 }
4293 
4294 #ifdef __cplusplus
4295 }
4296 #endif
4297 
4298 #endif /* _RTE_ETHDEV_H_ */
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1050
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:858
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
uint8_t tc_bws[ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1140
#define ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:471
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1053
struct rte_fdir_conf fdir_conf
Definition: rte_ethdev.h:868
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
uint32_t rmv
Definition: rte_ethdev.h:825
#define __rte_always_inline
Definition: rte_common.h:141
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:688
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_deprecated int rte_eth_dev_detach(uint16_t port_id, char *devname)
uint16_t nb_desc
Definition: rte_ethdev.h:1082
struct rte_eth_conf::@115 rx_adv_conf
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1028
uint16_t reta[RTE_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:542
const uint32_t * dev_flags
Definition: rte_ethdev.h:1018
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
void rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4157
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
rte_eth_nb_tcs
Definition: rte_ethdev.h:550
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:862
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:195
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:2549
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:671
uint16_t rte_eth_find_next(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
rte_fdir_pballoc_type
Definition: rte_ethdev.h:772
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:3924
uint64_t imissed
Definition: rte_ethdev.h:180
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:3877
uint32_t low_water
Definition: rte_ethdev.h:750
const char *__rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload)
uint32_t max_rx_pkt_len
Definition: rte_ethdev.h:329
uint8_t rss_key_len
Definition: rte_ethdev.h:377
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
uint8_t hthresh
Definition: rte_ethdev.h:261
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1032
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
uint16_t reta_size
Definition: rte_ethdev.h:1036
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
void * userdata
Definition: rte_mbuf.h:552
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
uint32_t lpbk_mode
Definition: rte_ethdev.h:843
enum rte_fdir_status_mode status
Definition: rte_ethdev.h:796
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:648
struct rte_eth_dcb_tc_queue_mapping::@118 tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint32_t link_speeds
Definition: rte_ethdev.h:834
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1034
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:309
rte_eth_fc_mode
Definition: rte_ethdev.h:736
int __rte_experimental rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
Definition: rte_ethdev.h:602
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:730
#define __rte_unused
Definition: rte_common.h:76
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:193
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
uint64_t opackets
Definition: rte_ethdev.h:177
rte_filter_op
Definition: rte_eth_ctrl.h:81
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:849
uint8_t hash_key_size
Definition: rte_ethdev.h:1038
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint16_t split_hdr_size
Definition: rte_ethdev.h:330
struct rte_mempool * mp
Definition: rte_ethdev.h:1070
uint32_t dcb_capability_en
Definition: rte_ethdev.h:867
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:191
const char * name
Definition: rte_ethdev.h:998
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1062
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
void rte_eth_allmulticast_enable(uint16_t port_id)
uint32_t rxq
Definition: rte_ethdev.h:823
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:687
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1046
struct rte_eth_vmdq_dcb_conf::@113 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:609
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
struct rte_eth_vmdq_rx_conf::@114 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1044
uint8_t rx_deferred_start
Definition: rte_ethdev.h:674
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:2392
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:2586
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:841
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:187
uint32_t high_water
Definition: rte_ethdev.h:749
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:601
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1081
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
Definition: rte_ethdev.h:869
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1086
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1047
int rte_eth_timesync_disable(uint16_t port_id)
uint64_t offloads
Definition: rte_ethdev.h:680
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
uint16_t send_xon
Definition: rte_ethdev.h:752
int rte_eth_stats_reset(uint16_t port_id)
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1042
#define unlikely(x)
uint16_t nb_max
Definition: rte_ethdev.h:705
uint64_t ibytes
Definition: rte_ethdev.h:178
uint64_t offloads
Definition: rte_ethdev.h:698
uint64_t oerrors
Definition: rte_ethdev.h:185
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:852
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:854
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1110
void rte_eth_promiscuous_enable(uint16_t port_id)
uint64_t offloads
Definition: rte_ethdev.h:336
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:328
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:632
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
uint16_t tx_free_thresh
Definition: rte_ethdev.h:689
uint16_t nb_desc
Definition: rte_ethdev.h:1073
void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:3801
struct rte_eth_dcb_tc_queue_mapping::@117 tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1027
uint8_t scattered_rx
Definition: rte_ethdev.h:1072
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:860
uint64_t offloads
Definition: rte_ethdev.h:654
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1045
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1030
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:189
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, uint8_t on)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:1227
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
uint16_t __rte_experimental rte_eth_dev_count_total(void)
uint64_t obytes
Definition: rte_ethdev.h:179
uint8_t enable_loop_back
Definition: rte_ethdev.h:635
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1071
void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
#define ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:472
void rte_eth_promiscuous_disable(uint16_t port_id)
uint16_t max_tx_queues
Definition: rte_ethdev.h:1022
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
rte_eth_dev_state
Definition: rte_ethdev.h:1233
uint16_t rx_free_thresh
Definition: rte_ethdev.h:672
struct rte_eth_vlan_mirror vlan
Definition: rte_ethdev.h:530
const char *__rte_experimental rte_eth_dev_rx_offload_name(uint64_t offload)
uint64_t dev_capa
Definition: rte_ethdev.h:1057
uint64_t ierrors
Definition: rte_ethdev.h:184
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:660
uint8_t priority
Definition: rte_ethdev.h:765
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1040
int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1055
rte_vlan_type
Definition: rte_ethdev.h:343
uint16_t nb_seg_max
Definition: rte_ethdev.h:717
uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1139
uint64_t ipackets
Definition: rte_ethdev.h:176
uint16_t max_vfs
Definition: rte_ethdev.h:1026
uint16_t pause_time
Definition: rte_ethdev.h:751
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1142
int __rte_experimental rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_filter_type
Definition: rte_eth_ctrl.h:63
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
uint64_t rx_nombuf
Definition: rte_ethdev.h:186
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:4284
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:660
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define __rte_cache_min_aligned
Definition: rte_memory.h:71
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr, uint32_t pool)
#define ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:268
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1043
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:2531
rte_eth_nb_pools
Definition: rte_ethdev.h:559
void rte_eth_xstats_reset(uint16_t port_id)
#define ETH_MIRROR_MAX_VLANS
Definition: rte_ethdev.h:505
uint16_t nb_align
Definition: rte_ethdev.h:707
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *mac_addr)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:276
const char * driver_name
Definition: rte_ethdev.h:1015
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:3848
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
Definition: rte_ethdev.h:633
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1051
struct rte_eth_fdir_flex_conf flex_conf
Definition: rte_ethdev.h:800
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int __rte_experimental rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1023
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
uint64_t value
Definition: rte_ethdev.h:1099
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
Definition: rte_ethdev.h:795
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1020
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
uint64_t rss_hf
Definition: rte_ethdev.h:378
int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id)
__rte_deprecated int rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
uint64_t id
Definition: rte_ethdev.h:1098
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:660
enum rte_fdir_mode mode
Definition: rte_ethdev.h:794
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
unsigned int if_index
Definition: rte_ethdev.h:1016
void rte_eth_allmulticast_disable(uint16_t port_id)
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:754
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:1203
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:753
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
rte_fdir_mode
Definition: rte_eth_ctrl.h:658
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:850
uint8_t * rss_key
Definition: rte_ethdev.h:376
rte_fdir_status_mode
Definition: rte_ethdev.h:781
uint8_t tx_deferred_start
Definition: rte_ethdev.h:692
uint8_t wthresh
Definition: rte_ethdev.h:262
uint16_t max_rx_queues
Definition: rte_ethdev.h:1021
union rte_eth_conf::@116 tx_adv_conf
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:764
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:842
int rte_eth_allmulticast_get(uint16_t port_id)
uint8_t rx_drop_en
Definition: rte_ethdev.h:673
int rte_eth_dev_is_valid_port(uint16_t port_id)
uint16_t nb_min
Definition: rte_ethdev.h:706
int rte_eth_timesync_enable(uint16_t port_id)
uint8_t pthresh
Definition: rte_ethdev.h:260
void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1041
uint32_t speed_capa
Definition: rte_ethdev.h:1048
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4068
uint8_t drop_queue
Definition: rte_ethdev.h:798
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
__rte_deprecated uint16_t rte_eth_dev_count(void)
uint8_t autoneg
Definition: rte_ethdev.h:755
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1019
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint32_t lsc
Definition: rte_ethdev.h:821
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:4231
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
rte_eth_event_type
Definition: rte_ethdev.h:2570