DPDK  18.08.0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_mbuf_ptype.h>
44 
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48 
49 /*
50  * Packet Offload Features Flags. It also carry packet type information.
51  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
52  *
53  * - RX flags start at bit position zero, and get added to the left of previous
54  * flags.
55  * - The most-significant 3 bits are reserved for generic mbuf flags
56  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
57  * added to the right of the previously defined flags i.e. they should count
58  * downwards, not upwards.
59  *
60  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
61  * rte_get_tx_ol_flag_name().
62  */
63 
71 #define PKT_RX_VLAN (1ULL << 0)
72 
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
84 
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
93 
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
103 
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
113 
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
118 
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
128 
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
133 
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
148 
154 #define PKT_RX_LRO (1ULL << 16)
155 
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
160 
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
165 
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
170 
178 #define PKT_RX_QINQ (1ULL << 20)
179 
180 /* add new RX flags here */
181 
182 /* add new TX flags here */
183 
189 #define PKT_TX_UDP_SEG (1ULL << 42)
190 
194 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
195 
200 #define PKT_TX_MACSEC (1ULL << 44)
201 
210 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
211 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
212 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
213 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
214 
215 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
216 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
217 
228 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
229 
241 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
242 /* add new TX TUNNEL type here */
243 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
244 
248 #define PKT_TX_QINQ (1ULL << 49)
249 /* this old name is deprecated */
250 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
251 
261 #define PKT_TX_TCP_SEG (1ULL << 50)
262 
263 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
273 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
274 #define PKT_TX_TCP_CKSUM (1ULL << 52)
275 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
276 #define PKT_TX_UDP_CKSUM (3ULL << 52)
277 #define PKT_TX_L4_MASK (3ULL << 52)
285 #define PKT_TX_IP_CKSUM (1ULL << 54)
286 
293 #define PKT_TX_IPV4 (1ULL << 55)
294 
301 #define PKT_TX_IPV6 (1ULL << 56)
302 
306 #define PKT_TX_VLAN (1ULL << 57)
307 /* this old name is deprecated */
308 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
309 
316 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
317 
323 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
324 
330 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
331 
336 #define PKT_TX_OFFLOAD_MASK ( \
337  PKT_TX_IP_CKSUM | \
338  PKT_TX_L4_MASK | \
339  PKT_TX_OUTER_IP_CKSUM | \
340  PKT_TX_TCP_SEG | \
341  PKT_TX_IEEE1588_TMST | \
342  PKT_TX_QINQ_PKT | \
343  PKT_TX_VLAN_PKT | \
344  PKT_TX_TUNNEL_MASK | \
345  PKT_TX_MACSEC | \
346  PKT_TX_SEC_OFFLOAD)
347 
351 #define EXT_ATTACHED_MBUF (1ULL << 61)
352 
353 #define IND_ATTACHED_MBUF (1ULL << 62)
356 #define RTE_MBUF_PRIV_ALIGN 8
357 
366 const char *rte_get_rx_ol_flag_name(uint64_t mask);
367 
380 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
381 
392 const char *rte_get_tx_ol_flag_name(uint64_t mask);
393 
406 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
407 
414 #define RTE_MBUF_DEFAULT_DATAROOM 2048
415 #define RTE_MBUF_DEFAULT_BUF_SIZE \
416  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
417 
418 /* define a set of marker types that can be used to refer to set points in the
419  * mbuf */
420 __extension__
421 typedef void *MARKER[0];
422 __extension__
423 typedef uint8_t MARKER8[0];
424 __extension__
425 typedef uint64_t MARKER64[0];
431 struct rte_mbuf {
432  MARKER cacheline0;
433 
434  void *buf_addr;
442  union {
443  rte_iova_t buf_iova;
445  } __rte_aligned(sizeof(rte_iova_t));
446 
447  /* next 8 bytes are initialised on RX descriptor rearm */
448  MARKER64 rearm_data;
449  uint16_t data_off;
450 
461  union {
463  uint16_t refcnt;
464  };
465  uint16_t nb_segs;
468  uint16_t port;
469 
470  uint64_t ol_flags;
472  /* remaining bytes are set on RX when pulling packet from descriptor */
473  MARKER rx_descriptor_fields1;
474 
475  /*
476  * The packet type, which is the combination of outer/inner L2, L3, L4
477  * and tunnel types. The packet_type is about data really present in the
478  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
479  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
480  * vlan is stripped from the data.
481  */
483  union {
484  uint32_t packet_type;
485  struct {
486  uint32_t l2_type:4;
487  uint32_t l3_type:4;
488  uint32_t l4_type:4;
489  uint32_t tun_type:4;
491  union {
497  __extension__
498  struct {
499  uint8_t inner_l2_type:4;
501  uint8_t inner_l3_type:4;
503  };
504  };
505  uint32_t inner_l4_type:4;
506  };
507  };
508 
509  uint32_t pkt_len;
510  uint16_t data_len;
512  uint16_t vlan_tci;
513 
514  union {
515  uint32_t rss;
516  struct {
518  union {
519  struct {
520  uint16_t hash;
521  uint16_t id;
522  };
523  uint32_t lo;
525  };
526  uint32_t hi;
529  } fdir;
530  struct {
531  uint32_t lo;
532  uint32_t hi;
533  } sched;
534  uint32_t usr;
535  } hash;
538  uint16_t vlan_tci_outer;
539 
540  uint16_t buf_len;
545  uint64_t timestamp;
546 
547  /* second cache line - fields only used in slow path or on TX */
548  MARKER cacheline1 __rte_cache_min_aligned;
549 
551  union {
552  void *userdata;
553  uint64_t udata64;
554  };
555 
556  struct rte_mempool *pool;
557  struct rte_mbuf *next;
559  /* fields to support TX offloads */
561  union {
562  uint64_t tx_offload;
563  __extension__
564  struct {
565  uint64_t l2_len:7;
569  uint64_t l3_len:9;
570  uint64_t l4_len:8;
571  uint64_t tso_segsz:16;
573  /* fields for TX offloading of tunnels */
574  uint64_t outer_l3_len:9;
575  uint64_t outer_l2_len:7;
577  /* uint64_t unused:8; */
578  };
579  };
580 
583  uint16_t priv_size;
584 
586  uint16_t timesync;
587 
589  uint32_t seqn;
590 
595 
597 
601 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
602 
608  void *fcb_opaque;
610 };
611 
613 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
614 
625 static inline void
627 {
628  rte_prefetch0(&m->cacheline0);
629 }
630 
642 static inline void
644 {
645 #if RTE_CACHE_LINE_SIZE == 64
646  rte_prefetch0(&m->cacheline1);
647 #else
648  RTE_SET_USED(m);
649 #endif
650 }
651 
652 
653 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
654 
663 static inline rte_iova_t
664 rte_mbuf_data_iova(const struct rte_mbuf *mb)
665 {
666  return mb->buf_iova + mb->data_off;
667 }
668 
669 __rte_deprecated
670 static inline phys_addr_t
671 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
672 {
673  return rte_mbuf_data_iova(mb);
674 }
675 
688 static inline rte_iova_t
690 {
691  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
692 }
693 
694 __rte_deprecated
695 static inline phys_addr_t
696 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
697 {
698  return rte_mbuf_data_iova_default(mb);
699 }
700 
709 static inline struct rte_mbuf *
711 {
712  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
713 }
714 
723 static inline char *
725 {
726  char *buffer_addr;
727  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
728  return buffer_addr;
729 }
730 
743 static inline void * __rte_experimental
745 {
746  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
747 }
748 
756 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
757 
762 #define RTE_MBUF_INDIRECT(mb) RTE_MBUF_CLONED(mb)
763 
769 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
770 
777 #define RTE_MBUF_DIRECT(mb) \
778  (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
779 
788  uint16_t mbuf_priv_size;
789 };
790 
791 #ifdef RTE_LIBRTE_MBUF_DEBUG
792 
794 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
795 
796 #else /* RTE_LIBRTE_MBUF_DEBUG */
797 
799 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
800 
801 #endif /* RTE_LIBRTE_MBUF_DEBUG */
802 
803 #ifdef RTE_MBUF_REFCNT_ATOMIC
804 
812 static inline uint16_t
813 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
814 {
815  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
816 }
817 
825 static inline void
826 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
827 {
828  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
829 }
830 
831 /* internal */
832 static inline uint16_t
833 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
834 {
835  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
836 }
837 
847 static inline uint16_t
848 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
849 {
850  /*
851  * The atomic_add is an expensive operation, so we don't want to
852  * call it in the case where we know we are the uniq holder of
853  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
854  * operation has to be used because concurrent accesses on the
855  * reference counter can occur.
856  */
857  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
858  ++value;
859  rte_mbuf_refcnt_set(m, (uint16_t)value);
860  return (uint16_t)value;
861  }
862 
863  return __rte_mbuf_refcnt_update(m, value);
864 }
865 
866 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
867 
868 /* internal */
869 static inline uint16_t
870 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
871 {
872  m->refcnt = (uint16_t)(m->refcnt + value);
873  return m->refcnt;
874 }
875 
879 static inline uint16_t
880 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
881 {
882  return __rte_mbuf_refcnt_update(m, value);
883 }
884 
888 static inline uint16_t
890 {
891  return m->refcnt;
892 }
893 
897 static inline void
898 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
899 {
900  m->refcnt = new_value;
901 }
902 
903 #endif /* RTE_MBUF_REFCNT_ATOMIC */
904 
913 static inline uint16_t
915 {
916  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
917 }
918 
927 static inline void
929  uint16_t new_value)
930 {
931  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
932 }
933 
945 static inline uint16_t
947  int16_t value)
948 {
949  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
950  ++value;
951  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
952  return (uint16_t)value;
953  }
954 
955  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
956 }
957 
959 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
960  if ((m) != NULL) \
961  rte_prefetch0(m); \
962 } while (0)
963 
964 
977 void
978 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
979 
980 #define MBUF_RAW_ALLOC_CHECK(m) do { \
981  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
982  RTE_ASSERT((m)->next == NULL); \
983  RTE_ASSERT((m)->nb_segs == 1); \
984  __rte_mbuf_sanity_check(m, 0); \
985 } while (0)
986 
1006 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
1007 {
1008  struct rte_mbuf *m;
1009 
1010  if (rte_mempool_get(mp, (void **)&m) < 0)
1011  return NULL;
1012  MBUF_RAW_ALLOC_CHECK(m);
1013  return m;
1014 }
1015 
1030 static __rte_always_inline void
1032 {
1033  RTE_ASSERT(RTE_MBUF_DIRECT(m));
1034  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
1035  RTE_ASSERT(m->next == NULL);
1036  RTE_ASSERT(m->nb_segs == 1);
1038  rte_mempool_put(m->pool, m);
1039 }
1040 
1041 /* compat with older versions */
1042 __rte_deprecated
1043 static inline void
1044 __rte_mbuf_raw_free(struct rte_mbuf *m)
1045 {
1046  rte_mbuf_raw_free(m);
1047 }
1048 
1068 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1069  void *m, unsigned i);
1070 
1071 
1089 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1090 
1125 struct rte_mempool *
1126 rte_pktmbuf_pool_create(const char *name, unsigned n,
1127  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1128  int socket_id);
1129 
1167 struct rte_mempool *
1168 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1169  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1170  int socket_id, const char *ops_name);
1171 
1183 static inline uint16_t
1185 {
1186  struct rte_pktmbuf_pool_private *mbp_priv;
1187 
1188  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1189  return mbp_priv->mbuf_data_room_size;
1190 }
1191 
1204 static inline uint16_t
1206 {
1207  struct rte_pktmbuf_pool_private *mbp_priv;
1208 
1209  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1210  return mbp_priv->mbuf_priv_size;
1211 }
1212 
1221 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1222 {
1223  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1224  (uint16_t)m->buf_len);
1225 }
1226 
1235 #define MBUF_INVALID_PORT UINT16_MAX
1236 
1237 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1238 {
1239  m->next = NULL;
1240  m->pkt_len = 0;
1241  m->tx_offload = 0;
1242  m->vlan_tci = 0;
1243  m->vlan_tci_outer = 0;
1244  m->nb_segs = 1;
1245  m->port = MBUF_INVALID_PORT;
1246 
1247  m->ol_flags = 0;
1248  m->packet_type = 0;
1250 
1251  m->data_len = 0;
1253 }
1254 
1268 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1269 {
1270  struct rte_mbuf *m;
1271  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1272  rte_pktmbuf_reset(m);
1273  return m;
1274 }
1275 
1290 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1291  struct rte_mbuf **mbufs, unsigned count)
1292 {
1293  unsigned idx = 0;
1294  int rc;
1295 
1296  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1297  if (unlikely(rc))
1298  return rc;
1299 
1300  /* To understand duff's device on loop unwinding optimization, see
1301  * https://en.wikipedia.org/wiki/Duff's_device.
1302  * Here while() loop is used rather than do() while{} to avoid extra
1303  * check if count is zero.
1304  */
1305  switch (count % 4) {
1306  case 0:
1307  while (idx != count) {
1308  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1309  rte_pktmbuf_reset(mbufs[idx]);
1310  idx++;
1311  /* fall-through */
1312  case 3:
1313  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1314  rte_pktmbuf_reset(mbufs[idx]);
1315  idx++;
1316  /* fall-through */
1317  case 2:
1318  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1319  rte_pktmbuf_reset(mbufs[idx]);
1320  idx++;
1321  /* fall-through */
1322  case 1:
1323  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1324  rte_pktmbuf_reset(mbufs[idx]);
1325  idx++;
1326  /* fall-through */
1327  }
1328  }
1329  return 0;
1330 }
1331 
1364 static inline struct rte_mbuf_ext_shared_info *
1365 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1367 {
1368  struct rte_mbuf_ext_shared_info *shinfo;
1369  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1370  void *addr;
1371 
1372  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1373  sizeof(uintptr_t));
1374  if (addr <= buf_addr)
1375  return NULL;
1376 
1377  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1378  shinfo->free_cb = free_cb;
1379  shinfo->fcb_opaque = fcb_opaque;
1380  rte_mbuf_ext_refcnt_set(shinfo, 1);
1381 
1382  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1383  return shinfo;
1384 }
1385 
1449 static inline void __rte_experimental
1450 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1451  rte_iova_t buf_iova, uint16_t buf_len,
1452  struct rte_mbuf_ext_shared_info *shinfo)
1453 {
1454  /* mbuf should not be read-only */
1455  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1456  RTE_ASSERT(shinfo->free_cb != NULL);
1457 
1458  m->buf_addr = buf_addr;
1459  m->buf_iova = buf_iova;
1460  m->buf_len = buf_len;
1461 
1462  m->data_len = 0;
1463  m->data_off = 0;
1464 
1466  m->shinfo = shinfo;
1467 }
1468 
1476 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1477 
1499 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1500 {
1501  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1502  rte_mbuf_refcnt_read(mi) == 1);
1503 
1504  if (RTE_MBUF_HAS_EXTBUF(m)) {
1506  mi->ol_flags = m->ol_flags;
1507  mi->shinfo = m->shinfo;
1508  } else {
1509  /* if m is not direct, get the mbuf that embeds the data */
1511  mi->priv_size = m->priv_size;
1512  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1513  }
1514 
1515  mi->buf_iova = m->buf_iova;
1516  mi->buf_addr = m->buf_addr;
1517  mi->buf_len = m->buf_len;
1518 
1519  mi->data_off = m->data_off;
1520  mi->data_len = m->data_len;
1521  mi->port = m->port;
1522  mi->vlan_tci = m->vlan_tci;
1523  mi->vlan_tci_outer = m->vlan_tci_outer;
1524  mi->tx_offload = m->tx_offload;
1525  mi->hash = m->hash;
1526 
1527  mi->next = NULL;
1528  mi->pkt_len = mi->data_len;
1529  mi->nb_segs = 1;
1530  mi->packet_type = m->packet_type;
1531  mi->timestamp = m->timestamp;
1532 
1533  __rte_mbuf_sanity_check(mi, 1);
1535 }
1536 
1544 static inline void
1545 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1546 {
1547  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1548  RTE_ASSERT(m->shinfo != NULL);
1549 
1550  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1551  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1552 }
1553 
1560 static inline void
1561 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1562 {
1563  struct rte_mbuf *md;
1564 
1565  RTE_ASSERT(RTE_MBUF_INDIRECT(m));
1566 
1567  md = rte_mbuf_from_indirect(m);
1568 
1569  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1570  md->next = NULL;
1571  md->nb_segs = 1;
1572  rte_mbuf_refcnt_set(md, 1);
1573  rte_mbuf_raw_free(md);
1574  }
1575 }
1576 
1590 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1591 {
1592  struct rte_mempool *mp = m->pool;
1593  uint32_t mbuf_size, buf_len;
1594  uint16_t priv_size;
1595 
1596  if (RTE_MBUF_HAS_EXTBUF(m))
1597  __rte_pktmbuf_free_extbuf(m);
1598  else
1599  __rte_pktmbuf_free_direct(m);
1600 
1601  priv_size = rte_pktmbuf_priv_size(mp);
1602  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1603  buf_len = rte_pktmbuf_data_room_size(mp);
1604 
1605  m->priv_size = priv_size;
1606  m->buf_addr = (char *)m + mbuf_size;
1607  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1608  m->buf_len = (uint16_t)buf_len;
1610  m->data_len = 0;
1611  m->ol_flags = 0;
1612 }
1613 
1628 static __rte_always_inline struct rte_mbuf *
1630 {
1632 
1633  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1634 
1635  if (!RTE_MBUF_DIRECT(m))
1636  rte_pktmbuf_detach(m);
1637 
1638  if (m->next != NULL) {
1639  m->next = NULL;
1640  m->nb_segs = 1;
1641  }
1642 
1643  return m;
1644 
1645  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1646 
1647  if (!RTE_MBUF_DIRECT(m))
1648  rte_pktmbuf_detach(m);
1649 
1650  if (m->next != NULL) {
1651  m->next = NULL;
1652  m->nb_segs = 1;
1653  }
1654  rte_mbuf_refcnt_set(m, 1);
1655 
1656  return m;
1657  }
1658  return NULL;
1659 }
1660 
1661 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1662 __rte_deprecated
1663 static inline struct rte_mbuf *
1664 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1665 {
1666  return rte_pktmbuf_prefree_seg(m);
1667 }
1668 
1678 static __rte_always_inline void
1680 {
1681  m = rte_pktmbuf_prefree_seg(m);
1682  if (likely(m != NULL))
1683  rte_mbuf_raw_free(m);
1684 }
1685 
1695 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1696 {
1697  struct rte_mbuf *m_next;
1698 
1699  if (m != NULL)
1701 
1702  while (m != NULL) {
1703  m_next = m->next;
1705  m = m_next;
1706  }
1707 }
1708 
1726 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1727  struct rte_mempool *mp)
1728 {
1729  struct rte_mbuf *mc, *mi, **prev;
1730  uint32_t pktlen;
1731  uint16_t nseg;
1732 
1733  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1734  return NULL;
1735 
1736  mi = mc;
1737  prev = &mi->next;
1738  pktlen = md->pkt_len;
1739  nseg = 0;
1740 
1741  do {
1742  nseg++;
1743  rte_pktmbuf_attach(mi, md);
1744  *prev = mi;
1745  prev = &mi->next;
1746  } while ((md = md->next) != NULL &&
1747  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1748 
1749  *prev = NULL;
1750  mc->nb_segs = nseg;
1751  mc->pkt_len = pktlen;
1752 
1753  /* Allocation of new indirect segment failed */
1754  if (unlikely (mi == NULL)) {
1755  rte_pktmbuf_free(mc);
1756  return NULL;
1757  }
1758 
1759  __rte_mbuf_sanity_check(mc, 1);
1760  return mc;
1761 }
1762 
1774 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1775 {
1777 
1778  do {
1779  rte_mbuf_refcnt_update(m, v);
1780  } while ((m = m->next) != NULL);
1781 }
1782 
1791 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1792 {
1794  return m->data_off;
1795 }
1796 
1805 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1806 {
1808  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1809  m->data_len);
1810 }
1811 
1820 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1821 {
1823  while (m->next != NULL)
1824  m = m->next;
1825  return m;
1826 }
1827 
1842 #define rte_pktmbuf_mtod_offset(m, t, o) \
1843  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1844 
1857 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1858 
1868 #define rte_pktmbuf_iova_offset(m, o) \
1869  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1870 
1871 /* deprecated */
1872 #define rte_pktmbuf_mtophys_offset(m, o) \
1873  rte_pktmbuf_iova_offset(m, o)
1874 
1882 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1883 
1884 /* deprecated */
1885 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1886 
1895 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1896 
1905 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1906 
1922 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1923  uint16_t len)
1924 {
1926 
1927  if (unlikely(len > rte_pktmbuf_headroom(m)))
1928  return NULL;
1929 
1930  /* NB: elaborating the subtraction like this instead of using
1931  * -= allows us to ensure the result type is uint16_t
1932  * avoiding compiler warnings on gcc 8.1 at least */
1933  m->data_off = (uint16_t)(m->data_off - len);
1934  m->data_len = (uint16_t)(m->data_len + len);
1935  m->pkt_len = (m->pkt_len + len);
1936 
1937  return (char *)m->buf_addr + m->data_off;
1938 }
1939 
1955 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1956 {
1957  void *tail;
1958  struct rte_mbuf *m_last;
1959 
1961 
1962  m_last = rte_pktmbuf_lastseg(m);
1963  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1964  return NULL;
1965 
1966  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1967  m_last->data_len = (uint16_t)(m_last->data_len + len);
1968  m->pkt_len = (m->pkt_len + len);
1969  return (char*) tail;
1970 }
1971 
1986 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1987 {
1989 
1990  if (unlikely(len > m->data_len))
1991  return NULL;
1992 
1993  /* NB: elaborating the addition like this instead of using
1994  * += allows us to ensure the result type is uint16_t
1995  * avoiding compiler warnings on gcc 8.1 at least */
1996  m->data_len = (uint16_t)(m->data_len - len);
1997  m->data_off = (uint16_t)(m->data_off + len);
1998  m->pkt_len = (m->pkt_len - len);
1999  return (char *)m->buf_addr + m->data_off;
2000 }
2001 
2016 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
2017 {
2018  struct rte_mbuf *m_last;
2019 
2021 
2022  m_last = rte_pktmbuf_lastseg(m);
2023  if (unlikely(len > m_last->data_len))
2024  return -1;
2025 
2026  m_last->data_len = (uint16_t)(m_last->data_len - len);
2027  m->pkt_len = (m->pkt_len - len);
2028  return 0;
2029 }
2030 
2040 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
2041 {
2043  return !!(m->nb_segs == 1);
2044 }
2045 
2049 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
2050  uint32_t len, void *buf);
2051 
2072 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
2073  uint32_t off, uint32_t len, void *buf)
2074 {
2075  if (likely(off + len <= rte_pktmbuf_data_len(m)))
2076  return rte_pktmbuf_mtod_offset(m, char *, off);
2077  else
2078  return __rte_pktmbuf_read(m, off, len, buf);
2079 }
2080 
2097 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
2098 {
2099  struct rte_mbuf *cur_tail;
2100 
2101  /* Check for number-of-segments-overflow */
2102  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
2103  return -EOVERFLOW;
2104 
2105  /* Chain 'tail' onto the old tail */
2106  cur_tail = rte_pktmbuf_lastseg(head);
2107  cur_tail->next = tail;
2108 
2109  /* accumulate number of segments and total length.
2110  * NB: elaborating the addition like this instead of using
2111  * -= allows us to ensure the result type is uint16_t
2112  * avoiding compiler warnings on gcc 8.1 at least */
2113  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
2114  head->pkt_len += tail->pkt_len;
2115 
2116  /* pkt_len is only set in the head */
2117  tail->pkt_len = tail->data_len;
2118 
2119  return 0;
2120 }
2121 
2132 static inline int
2134 {
2135  uint64_t ol_flags = m->ol_flags;
2136  uint64_t inner_l3_offset = m->l2_len;
2137 
2138  /* Does packet set any of available offloads? */
2139  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
2140  return 0;
2141 
2143  /* NB: elaborating the addition like this instead of using
2144  * += gives the result uint64_t type instead of int,
2145  * avoiding compiler warnings on gcc 8.1 at least */
2146  inner_l3_offset = inner_l3_offset + m->outer_l2_len +
2147  m->outer_l3_len;
2148 
2149  /* Headers are fragmented */
2150  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
2151  return -ENOTSUP;
2152 
2153  /* IP checksum can be counted only for IPv4 packet */
2155  return -EINVAL;
2156 
2157  /* IP type not set when required */
2159  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
2160  return -EINVAL;
2161 
2162  /* Check requirements for TSO packet */
2163  if (ol_flags & PKT_TX_TCP_SEG)
2164  if ((m->tso_segsz == 0) ||
2165  ((ol_flags & PKT_TX_IPV4) &&
2166  !(ol_flags & PKT_TX_IP_CKSUM)))
2167  return -EINVAL;
2168 
2169  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
2170  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2172  return -EINVAL;
2173 
2174  return 0;
2175 }
2176 
2189 static inline int
2191 {
2192  size_t seg_len, copy_len;
2193  struct rte_mbuf *m;
2194  struct rte_mbuf *m_next;
2195  char *buffer;
2196 
2197  if (rte_pktmbuf_is_contiguous(mbuf))
2198  return 0;
2199 
2200  /* Extend first segment to the total packet length */
2201  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
2202 
2203  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
2204  return -1;
2205 
2206  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
2207  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
2208 
2209  /* Append data from next segments to the first one */
2210  m = mbuf->next;
2211  while (m != NULL) {
2212  m_next = m->next;
2213 
2214  seg_len = rte_pktmbuf_data_len(m);
2215  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
2216  buffer += seg_len;
2217 
2219  m = m_next;
2220  }
2221 
2222  mbuf->next = NULL;
2223  mbuf->nb_segs = 1;
2224 
2225  return 0;
2226 }
2227 
2242 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2243 
2244 #ifdef __cplusplus
2245 }
2246 #endif
2247 
2248 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
Definition: rte_mbuf.h:594
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:664
struct rte_mbuf * next
Definition: rte_mbuf.h:557
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:787
uint64_t timestamp
Definition: rte_mbuf.h:545
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:538
#define __rte_always_inline
Definition: rte_common.h:141
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:256
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1268
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:492
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:421
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:777
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:353
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:444
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1205
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2133
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1695
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:176
uint64_t l2_len
Definition: rte_mbuf.h:565
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1726
struct rte_mbuf::@164::@175 fdir
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1679
void * buf_addr
Definition: rte_mbuf.h:434
uint32_t l2_type
Definition: rte_mbuf.h:486
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:710
uint16_t data_len
Definition: rte_mbuf.h:510
uint32_t lo
Definition: rte_mbuf.h:523
rte_mbuf_extbuf_free_callback_t free_cb
Definition: rte_mbuf.h:607
void * userdata
Definition: rte_mbuf.h:552
struct rte_mbuf::@164::@176 sched
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:2097
RTE_STD_C11 union rte_mbuf::@161 __rte_aligned
uint8_t inner_l2_type
Definition: rte_mbuf.h:499
uint64_t tso_segsz
Definition: rte_mbuf.h:571
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:423
uint64_t l4_len
Definition: rte_mbuf.h:570
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1791
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1290
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1221
uint32_t cache_size
Definition: rte_mempool.h:230
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:316
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:643
#define PKT_TX_IPV6
Definition: rte_mbuf.h:301
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:946
uint16_t nb_segs
Definition: rte_mbuf.h:465
uint16_t port
Definition: rte_mbuf.h:468
uint64_t outer_l3_len
Definition: rte_mbuf.h:574
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1629
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2040
uint64_t l3_len
Definition: rte_mbuf.h:569
uint32_t l4_type
Definition: rte_mbuf.h:488
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:153
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:323
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1805
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1031
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:261
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:583
uint16_t timesync
Definition: rte_mbuf.h:586
uint32_t hi
Definition: rte_mbuf.h:526
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:425
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:412
#define PKT_TX_IPV4
Definition: rte_mbuf.h:293
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:799
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:889
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:2190
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1483
uint64_t outer_l2_len
Definition: rte_mbuf.h:575
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:270
uint16_t refcnt
Definition: rte_mbuf.h:463
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1986
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1895
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1499
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1455
uint32_t tun_type
Definition: rte_mbuf.h:489
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:351
uint64_t ol_flags
Definition: rte_mbuf.h:470
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1590
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:914
uint32_t pkt_len
Definition: rte_mbuf.h:509
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:277
uint16_t buf_len
Definition: rte_mbuf.h:540
uint32_t inner_l4_type
Definition: rte_mbuf.h:505
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1905
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1857
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:880
uint32_t packet_type
Definition: rte_mbuf.h:484
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1235
uint32_t seqn
Definition: rte_mbuf.h:589
#define EXT_ATTACHED_MBUF
Definition: rte_mbuf.h:351
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1184
uint8_t inner_l3_type
Definition: rte_mbuf.h:501
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1365
#define RTE_MBUF_HAS_EXTBUF(mb)
Definition: rte_mbuf.h:769
#define RTE_STD_C11
Definition: rte_common.h:37
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:285
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:556
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:928
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1955
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:898
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
Definition: rte_mbuf.h:601
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:689
uint32_t rss
Definition: rte_mbuf.h:515
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2016
uint64_t rte_iova_t
Definition: rte_memory.h:82
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:724
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:2072
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1922
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1006
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1774
uint64_t phys_addr_t
Definition: rte_memory.h:73
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:158
#define __rte_cache_aligned
Definition: rte_memory.h:66
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1820
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:336
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1609
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1324
uint64_t udata64
Definition: rte_mbuf.h:553
uint32_t l3_type
Definition: rte_mbuf.h:487
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:165
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:626
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:462
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1637
uint64_t tx_offload
Definition: rte_mbuf.h:562
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
uint16_t vlan_tci
Definition: rte_mbuf.h:512
static void *__rte_experimental rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:744
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:762
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:609
static void __rte_experimental rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1450
#define RTE_SET_USED(x)
Definition: rte_common.h:82
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1842
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:534