DPDK  17.11.2
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
63 #include <stdint.h>
64 #include <rte_common.h>
65 #include <rte_config.h>
66 #include <rte_mempool.h>
67 #include <rte_memory.h>
68 #include <rte_atomic.h>
69 #include <rte_prefetch.h>
70 #include <rte_branch_prediction.h>
71 #include <rte_mbuf_ptype.h>
72 
73 #ifdef __cplusplus
74 extern "C" {
75 #endif
76 
77 /*
78  * Packet Offload Features Flags. It also carry packet type information.
79  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
80  *
81  * - RX flags start at bit position zero, and get added to the left of previous
82  * flags.
83  * - The most-significant 3 bits are reserved for generic mbuf flags
84  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
85  * added to the right of the previously defined flags i.e. they should count
86  * downwards, not upwards.
87  *
88  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
89  * rte_get_tx_ol_flag_name().
90  */
91 
99 #define PKT_RX_VLAN (1ULL << 0)
100 
101 #define PKT_RX_RSS_HASH (1ULL << 1)
102 #define PKT_RX_FDIR (1ULL << 2)
111 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
112 
120 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
121 
122 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
130 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
131 
140 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
141 
142 #define PKT_RX_IP_CKSUM_UNKNOWN 0
143 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
144 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
145 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
146 
155 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
156 
157 #define PKT_RX_L4_CKSUM_UNKNOWN 0
158 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
159 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
160 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
161 
162 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
163 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
164 #define PKT_RX_FDIR_ID (1ULL << 13)
165 #define PKT_RX_FDIR_FLX (1ULL << 14)
175 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
176 
182 #define PKT_RX_LRO (1ULL << 16)
183 
187 #define PKT_RX_TIMESTAMP (1ULL << 17)
188 
192 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
193 
197 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
198 
206 #define PKT_RX_QINQ (1ULL << 20)
207 
208 /* add new RX flags here */
209 
210 /* add new TX flags here */
211 
215 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
216 
221 #define PKT_TX_MACSEC (1ULL << 44)
222 
228 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
229 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
230 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
231 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
232 
233 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
234 /* add new TX TUNNEL type here */
235 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
236 
240 #define PKT_TX_QINQ_PKT (1ULL << 49)
255 #define PKT_TX_TCP_SEG (1ULL << 50)
256 
257 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
270 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
271 #define PKT_TX_TCP_CKSUM (1ULL << 52)
272 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
273 #define PKT_TX_UDP_CKSUM (3ULL << 52)
274 #define PKT_TX_L4_MASK (3ULL << 52)
283 #define PKT_TX_IP_CKSUM (1ULL << 54)
284 
291 #define PKT_TX_IPV4 (1ULL << 55)
292 
299 #define PKT_TX_IPV6 (1ULL << 56)
300 
301 #define PKT_TX_VLAN_PKT (1ULL << 57)
311 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
312 
318 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
319 
325 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
326 
331 #define PKT_TX_OFFLOAD_MASK ( \
332  PKT_TX_IP_CKSUM | \
333  PKT_TX_L4_MASK | \
334  PKT_TX_OUTER_IP_CKSUM | \
335  PKT_TX_TCP_SEG | \
336  PKT_TX_IEEE1588_TMST | \
337  PKT_TX_QINQ_PKT | \
338  PKT_TX_VLAN_PKT | \
339  PKT_TX_TUNNEL_MASK | \
340  PKT_TX_MACSEC | \
341  PKT_TX_SEC_OFFLOAD)
342 
343 #define __RESERVED (1ULL << 61)
345 #define IND_ATTACHED_MBUF (1ULL << 62)
347 /* Use final bit of flags to indicate a control mbuf */
348 #define CTRL_MBUF_FLAG (1ULL << 63)
351 #define RTE_MBUF_PRIV_ALIGN 8
352 
361 const char *rte_get_rx_ol_flag_name(uint64_t mask);
362 
375 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
376 
387 const char *rte_get_tx_ol_flag_name(uint64_t mask);
388 
401 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
402 
409 #define RTE_MBUF_DEFAULT_DATAROOM 2048
410 #define RTE_MBUF_DEFAULT_BUF_SIZE \
411  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
412 
413 /* define a set of marker types that can be used to refer to set points in the
414  * mbuf */
415 __extension__
416 typedef void *MARKER[0];
417 __extension__
418 typedef uint8_t MARKER8[0];
419 __extension__
420 typedef uint64_t MARKER64[0];
426 struct rte_mbuf {
427  MARKER cacheline0;
428 
429  void *buf_addr;
437  union {
438  rte_iova_t buf_iova;
440  } __rte_aligned(sizeof(rte_iova_t));
441 
442  /* next 8 bytes are initialised on RX descriptor rearm */
443  MARKER64 rearm_data;
444  uint16_t data_off;
445 
456  union {
458  uint16_t refcnt;
459  };
460  uint16_t nb_segs;
463  uint16_t port;
464 
465  uint64_t ol_flags;
467  /* remaining bytes are set on RX when pulling packet from descriptor */
468  MARKER rx_descriptor_fields1;
469 
470  /*
471  * The packet type, which is the combination of outer/inner L2, L3, L4
472  * and tunnel types. The packet_type is about data really present in the
473  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
474  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
475  * vlan is stripped from the data.
476  */
478  union {
479  uint32_t packet_type;
480  struct {
481  uint32_t l2_type:4;
482  uint32_t l3_type:4;
483  uint32_t l4_type:4;
484  uint32_t tun_type:4;
486  union {
492  __extension__
493  struct {
494  uint8_t inner_l2_type:4;
496  uint8_t inner_l3_type:4;
498  };
499  };
500  uint32_t inner_l4_type:4;
501  };
502  };
503 
504  uint32_t pkt_len;
505  uint16_t data_len;
507  uint16_t vlan_tci;
508 
509  union {
510  uint32_t rss;
511  struct {
513  union {
514  struct {
515  uint16_t hash;
516  uint16_t id;
517  };
518  uint32_t lo;
520  };
521  uint32_t hi;
524  } fdir;
525  struct {
526  uint32_t lo;
527  uint32_t hi;
528  } sched;
529  uint32_t usr;
530  } hash;
533  uint16_t vlan_tci_outer;
534 
535  uint16_t buf_len;
540  uint64_t timestamp;
541 
542  /* second cache line - fields only used in slow path or on TX */
543  MARKER cacheline1 __rte_cache_min_aligned;
544 
546  union {
547  void *userdata;
548  uint64_t udata64;
549  };
550 
551  struct rte_mempool *pool;
552  struct rte_mbuf *next;
554  /* fields to support TX offloads */
556  union {
557  uint64_t tx_offload;
558  __extension__
559  struct {
560  uint64_t l2_len:7;
564  uint64_t l3_len:9;
565  uint64_t l4_len:8;
566  uint64_t tso_segsz:16;
568  /* fields for TX offloading of tunnels */
569  uint64_t outer_l3_len:9;
570  uint64_t outer_l2_len:7;
572  /* uint64_t unused:8; */
573  };
574  };
575 
578  uint16_t priv_size;
579 
581  uint16_t timesync;
582 
584  uint32_t seqn;
585 
587 
589 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
590 
601 static inline void
603 {
604  rte_prefetch0(&m->cacheline0);
605 }
606 
618 static inline void
620 {
621 #if RTE_CACHE_LINE_SIZE == 64
622  rte_prefetch0(&m->cacheline1);
623 #else
624  RTE_SET_USED(m);
625 #endif
626 }
627 
628 
629 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
630 
639 static inline rte_iova_t
640 rte_mbuf_data_iova(const struct rte_mbuf *mb)
641 {
642  return mb->buf_iova + mb->data_off;
643 }
644 
645 __rte_deprecated
646 static inline phys_addr_t
647 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
648 {
649  return rte_mbuf_data_iova(mb);
650 }
651 
664 static inline rte_iova_t
666 {
667  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
668 }
669 
670 __rte_deprecated
671 static inline phys_addr_t
672 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
673 {
674  return rte_mbuf_data_iova_default(mb);
675 }
676 
685 static inline struct rte_mbuf *
687 {
688  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
689 }
690 
699 static inline char *
701 {
702  char *buffer_addr;
703  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
704  return buffer_addr;
705 }
706 
710 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
711 
715 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
716 
725  uint16_t mbuf_priv_size;
726 };
727 
728 #ifdef RTE_LIBRTE_MBUF_DEBUG
729 
731 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
732 
733 #else /* RTE_LIBRTE_MBUF_DEBUG */
734 
736 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
737 
738 #endif /* RTE_LIBRTE_MBUF_DEBUG */
739 
740 #ifdef RTE_MBUF_REFCNT_ATOMIC
741 
749 static inline uint16_t
750 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
751 {
752  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
753 }
754 
762 static inline void
763 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
764 {
765  rte_atomic16_set(&m->refcnt_atomic, new_value);
766 }
767 
768 /* internal */
769 static inline uint16_t
770 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
771 {
772  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
773 }
774 
784 static inline uint16_t
785 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
786 {
787  /*
788  * The atomic_add is an expensive operation, so we don't want to
789  * call it in the case where we know we are the uniq holder of
790  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
791  * operation has to be used because concurrent accesses on the
792  * reference counter can occur.
793  */
794  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
795  rte_mbuf_refcnt_set(m, 1 + value);
796  return 1 + value;
797  }
798 
799  return __rte_mbuf_refcnt_update(m, value);
800 }
801 
802 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
803 
804 /* internal */
805 static inline uint16_t
806 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
807 {
808  m->refcnt = (uint16_t)(m->refcnt + value);
809  return m->refcnt;
810 }
811 
815 static inline uint16_t
816 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
817 {
818  return __rte_mbuf_refcnt_update(m, value);
819 }
820 
824 static inline uint16_t
826 {
827  return m->refcnt;
828 }
829 
833 static inline void
834 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
835 {
836  m->refcnt = new_value;
837 }
838 
839 #endif /* RTE_MBUF_REFCNT_ATOMIC */
840 
842 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
843  if ((m) != NULL) \
844  rte_prefetch0(m); \
845 } while (0)
846 
847 
860 void
861 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
862 
863 #define MBUF_RAW_ALLOC_CHECK(m) do { \
864  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
865  RTE_ASSERT((m)->next == NULL); \
866  RTE_ASSERT((m)->nb_segs == 1); \
867  __rte_mbuf_sanity_check(m, 0); \
868 } while (0)
869 
889 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
890 {
891  struct rte_mbuf *m;
892  void *mb = NULL;
893 
894  if (rte_mempool_get(mp, &mb) < 0)
895  return NULL;
896  m = (struct rte_mbuf *)mb;
897  MBUF_RAW_ALLOC_CHECK(m);
898  return m;
899 }
900 
915 static __rte_always_inline void
917 {
918  RTE_ASSERT(RTE_MBUF_DIRECT(m));
919  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
920  RTE_ASSERT(m->next == NULL);
921  RTE_ASSERT(m->nb_segs == 1);
923  rte_mempool_put(m->pool, m);
924 }
925 
926 /* compat with older versions */
927 __rte_deprecated
928 static inline void
929 __rte_mbuf_raw_free(struct rte_mbuf *m)
930 {
932 }
933 
934 /* Operations on ctrl mbuf */
935 
955 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
956  void *m, unsigned i);
957 
970 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
971 
978 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
979 
988 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
989 
998 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
999 
1009 static inline int
1011 {
1012  return !!(m->ol_flags & CTRL_MBUF_FLAG);
1013 }
1014 
1015 /* Operations on pkt mbuf */
1016 
1036 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1037  void *m, unsigned i);
1038 
1039 
1057 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1058 
1093 struct rte_mempool *
1094 rte_pktmbuf_pool_create(const char *name, unsigned n,
1095  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1096  int socket_id);
1097 
1109 static inline uint16_t
1111 {
1112  struct rte_pktmbuf_pool_private *mbp_priv;
1113 
1114  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1115  return mbp_priv->mbuf_data_room_size;
1116 }
1117 
1130 static inline uint16_t
1132 {
1133  struct rte_pktmbuf_pool_private *mbp_priv;
1134 
1135  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1136  return mbp_priv->mbuf_priv_size;
1137 }
1138 
1147 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1148 {
1149  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1150 }
1151 
1160 #define MBUF_INVALID_PORT UINT16_MAX
1161 
1162 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1163 {
1164  m->next = NULL;
1165  m->pkt_len = 0;
1166  m->tx_offload = 0;
1167  m->vlan_tci = 0;
1168  m->vlan_tci_outer = 0;
1169  m->nb_segs = 1;
1170  m->port = MBUF_INVALID_PORT;
1171 
1172  m->ol_flags = 0;
1173  m->packet_type = 0;
1175 
1176  m->data_len = 0;
1178 }
1179 
1193 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1194 {
1195  struct rte_mbuf *m;
1196  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1197  rte_pktmbuf_reset(m);
1198  return m;
1199 }
1200 
1215 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1216  struct rte_mbuf **mbufs, unsigned count)
1217 {
1218  unsigned idx = 0;
1219  int rc;
1220 
1221  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1222  if (unlikely(rc))
1223  return rc;
1224 
1225  /* To understand duff's device on loop unwinding optimization, see
1226  * https://en.wikipedia.org/wiki/Duff's_device.
1227  * Here while() loop is used rather than do() while{} to avoid extra
1228  * check if count is zero.
1229  */
1230  switch (count % 4) {
1231  case 0:
1232  while (idx != count) {
1233  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1234  rte_pktmbuf_reset(mbufs[idx]);
1235  idx++;
1236  /* fall-through */
1237  case 3:
1238  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1239  rte_pktmbuf_reset(mbufs[idx]);
1240  idx++;
1241  /* fall-through */
1242  case 2:
1243  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1244  rte_pktmbuf_reset(mbufs[idx]);
1245  idx++;
1246  /* fall-through */
1247  case 1:
1248  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1249  rte_pktmbuf_reset(mbufs[idx]);
1250  idx++;
1251  /* fall-through */
1252  }
1253  }
1254  return 0;
1255 }
1256 
1274 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1275 {
1276  struct rte_mbuf *md;
1277 
1278  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1279  rte_mbuf_refcnt_read(mi) == 1);
1280 
1281  /* if m is not direct, get the mbuf that embeds the data */
1282  if (RTE_MBUF_DIRECT(m))
1283  md = m;
1284  else
1285  md = rte_mbuf_from_indirect(m);
1286 
1287  rte_mbuf_refcnt_update(md, 1);
1288  mi->priv_size = m->priv_size;
1289  mi->buf_iova = m->buf_iova;
1290  mi->buf_addr = m->buf_addr;
1291  mi->buf_len = m->buf_len;
1292 
1293  mi->data_off = m->data_off;
1294  mi->data_len = m->data_len;
1295  mi->port = m->port;
1296  mi->vlan_tci = m->vlan_tci;
1297  mi->vlan_tci_outer = m->vlan_tci_outer;
1298  mi->tx_offload = m->tx_offload;
1299  mi->hash = m->hash;
1300 
1301  mi->next = NULL;
1302  mi->pkt_len = mi->data_len;
1303  mi->nb_segs = 1;
1304  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1305  mi->packet_type = m->packet_type;
1306  mi->timestamp = m->timestamp;
1307 
1308  __rte_mbuf_sanity_check(mi, 1);
1310 }
1311 
1325 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1326 {
1327  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1328  struct rte_mempool *mp = m->pool;
1329  uint32_t mbuf_size, buf_len, priv_size;
1330 
1331  priv_size = rte_pktmbuf_priv_size(mp);
1332  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1333  buf_len = rte_pktmbuf_data_room_size(mp);
1334 
1335  m->priv_size = priv_size;
1336  m->buf_addr = (char *)m + mbuf_size;
1337  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1338  m->buf_len = (uint16_t)buf_len;
1340  m->data_len = 0;
1341  m->ol_flags = 0;
1342 
1343  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1344  md->next = NULL;
1345  md->nb_segs = 1;
1346  rte_mbuf_refcnt_set(md, 1);
1347  rte_mbuf_raw_free(md);
1348  }
1349 }
1350 
1365 static __rte_always_inline struct rte_mbuf *
1367 {
1369 
1370  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1371 
1372  if (RTE_MBUF_INDIRECT(m))
1373  rte_pktmbuf_detach(m);
1374 
1375  if (m->next != NULL) {
1376  m->next = NULL;
1377  m->nb_segs = 1;
1378  }
1379 
1380  return m;
1381 
1382  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1383 
1384  if (RTE_MBUF_INDIRECT(m))
1385  rte_pktmbuf_detach(m);
1386 
1387  if (m->next != NULL) {
1388  m->next = NULL;
1389  m->nb_segs = 1;
1390  }
1391  rte_mbuf_refcnt_set(m, 1);
1392 
1393  return m;
1394  }
1395  return NULL;
1396 }
1397 
1398 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1399 __rte_deprecated
1400 static inline struct rte_mbuf *
1401 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1402 {
1403  return rte_pktmbuf_prefree_seg(m);
1404 }
1405 
1415 static __rte_always_inline void
1417 {
1418  m = rte_pktmbuf_prefree_seg(m);
1419  if (likely(m != NULL))
1420  rte_mbuf_raw_free(m);
1421 }
1422 
1432 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1433 {
1434  struct rte_mbuf *m_next;
1435 
1436  if (m != NULL)
1438 
1439  while (m != NULL) {
1440  m_next = m->next;
1442  m = m_next;
1443  }
1444 }
1445 
1463 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1464  struct rte_mempool *mp)
1465 {
1466  struct rte_mbuf *mc, *mi, **prev;
1467  uint32_t pktlen;
1468  uint16_t nseg;
1469 
1470  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1471  return NULL;
1472 
1473  mi = mc;
1474  prev = &mi->next;
1475  pktlen = md->pkt_len;
1476  nseg = 0;
1477 
1478  do {
1479  nseg++;
1480  rte_pktmbuf_attach(mi, md);
1481  *prev = mi;
1482  prev = &mi->next;
1483  } while ((md = md->next) != NULL &&
1484  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1485 
1486  *prev = NULL;
1487  mc->nb_segs = nseg;
1488  mc->pkt_len = pktlen;
1489 
1490  /* Allocation of new indirect segment failed */
1491  if (unlikely (mi == NULL)) {
1492  rte_pktmbuf_free(mc);
1493  return NULL;
1494  }
1495 
1496  __rte_mbuf_sanity_check(mc, 1);
1497  return mc;
1498 }
1499 
1511 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1512 {
1514 
1515  do {
1516  rte_mbuf_refcnt_update(m, v);
1517  } while ((m = m->next) != NULL);
1518 }
1519 
1528 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1529 {
1531  return m->data_off;
1532 }
1533 
1542 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1543 {
1545  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1546  m->data_len);
1547 }
1548 
1557 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1558 {
1559  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1560 
1562  while (m2->next != NULL)
1563  m2 = m2->next;
1564  return m2;
1565 }
1566 
1581 #define rte_pktmbuf_mtod_offset(m, t, o) \
1582  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1583 
1596 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1597 
1607 #define rte_pktmbuf_iova_offset(m, o) \
1608  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1609 
1610 /* deprecated */
1611 #define rte_pktmbuf_mtophys_offset(m, o) \
1612  rte_pktmbuf_iova_offset(m, o)
1613 
1621 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1622 
1623 /* deprecated */
1624 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1625 
1634 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1635 
1644 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1645 
1661 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1662  uint16_t len)
1663 {
1665 
1666  if (unlikely(len > rte_pktmbuf_headroom(m)))
1667  return NULL;
1668 
1669  m->data_off -= len;
1670  m->data_len = (uint16_t)(m->data_len + len);
1671  m->pkt_len = (m->pkt_len + len);
1672 
1673  return (char *)m->buf_addr + m->data_off;
1674 }
1675 
1691 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1692 {
1693  void *tail;
1694  struct rte_mbuf *m_last;
1695 
1697 
1698  m_last = rte_pktmbuf_lastseg(m);
1699  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1700  return NULL;
1701 
1702  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1703  m_last->data_len = (uint16_t)(m_last->data_len + len);
1704  m->pkt_len = (m->pkt_len + len);
1705  return (char*) tail;
1706 }
1707 
1722 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1723 {
1725 
1726  if (unlikely(len > m->data_len))
1727  return NULL;
1728 
1729  m->data_len = (uint16_t)(m->data_len - len);
1730  m->data_off += len;
1731  m->pkt_len = (m->pkt_len - len);
1732  return (char *)m->buf_addr + m->data_off;
1733 }
1734 
1749 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1750 {
1751  struct rte_mbuf *m_last;
1752 
1754 
1755  m_last = rte_pktmbuf_lastseg(m);
1756  if (unlikely(len > m_last->data_len))
1757  return -1;
1758 
1759  m_last->data_len = (uint16_t)(m_last->data_len - len);
1760  m->pkt_len = (m->pkt_len - len);
1761  return 0;
1762 }
1763 
1773 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1774 {
1776  return !!(m->nb_segs == 1);
1777 }
1778 
1782 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1783  uint32_t len, void *buf);
1784 
1805 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1806  uint32_t off, uint32_t len, void *buf)
1807 {
1808  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1809  return rte_pktmbuf_mtod_offset(m, char *, off);
1810  else
1811  return __rte_pktmbuf_read(m, off, len, buf);
1812 }
1813 
1830 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1831 {
1832  struct rte_mbuf *cur_tail;
1833 
1834  /* Check for number-of-segments-overflow */
1835  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1836  return -EOVERFLOW;
1837 
1838  /* Chain 'tail' onto the old tail */
1839  cur_tail = rte_pktmbuf_lastseg(head);
1840  cur_tail->next = tail;
1841 
1842  /* accumulate number of segments and total length. */
1843  head->nb_segs += tail->nb_segs;
1844  head->pkt_len += tail->pkt_len;
1845 
1846  /* pkt_len is only set in the head */
1847  tail->pkt_len = tail->data_len;
1848 
1849  return 0;
1850 }
1851 
1862 static inline int
1864 {
1865  uint64_t ol_flags = m->ol_flags;
1866  uint64_t inner_l3_offset = m->l2_len;
1867 
1868  /* Does packet set any of available offloads? */
1869  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1870  return 0;
1871 
1872  if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1873  inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
1874 
1875  /* Headers are fragmented */
1876  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
1877  return -ENOTSUP;
1878 
1879  /* IP checksum can be counted only for IPv4 packet */
1880  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1881  return -EINVAL;
1882 
1883  /* IP type not set when required */
1884  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1885  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1886  return -EINVAL;
1887 
1888  /* Check requirements for TSO packet */
1889  if (ol_flags & PKT_TX_TCP_SEG)
1890  if ((m->tso_segsz == 0) ||
1891  ((ol_flags & PKT_TX_IPV4) &&
1892  !(ol_flags & PKT_TX_IP_CKSUM)))
1893  return -EINVAL;
1894 
1895  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1896  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1897  !(ol_flags & PKT_TX_OUTER_IPV4))
1898  return -EINVAL;
1899 
1900  return 0;
1901 }
1902 
1915 static inline int
1917 {
1918  int seg_len, copy_len;
1919  struct rte_mbuf *m;
1920  struct rte_mbuf *m_next;
1921  char *buffer;
1922 
1923  if (rte_pktmbuf_is_contiguous(mbuf))
1924  return 0;
1925 
1926  /* Extend first segment to the total packet length */
1927  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
1928 
1929  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
1930  return -1;
1931 
1932  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
1933  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
1934 
1935  /* Append data from next segments to the first one */
1936  m = mbuf->next;
1937  while (m != NULL) {
1938  m_next = m->next;
1939 
1940  seg_len = rte_pktmbuf_data_len(m);
1941  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
1942  buffer += seg_len;
1943 
1945  m = m_next;
1946  }
1947 
1948  mbuf->next = NULL;
1949  mbuf->nb_segs = 1;
1950 
1951  return 0;
1952 }
1953 
1968 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1969 
1970 #ifdef __cplusplus
1971 }
1972 #endif
1973 
1974 #endif /* _RTE_MBUF_H_ */
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:640
struct rte_mbuf * next
Definition: rte_mbuf.h:552
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:724
uint64_t timestamp
Definition: rte_mbuf.h:540
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:533
#define __rte_always_inline
Definition: rte_common.h:139
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:204
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1193
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:487
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:416
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:715
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:345
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:439
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1131
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1863
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1432
uint64_t l2_len
Definition: rte_mbuf.h:560
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1463
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1416
void * buf_addr
Definition: rte_mbuf.h:429
uint32_t l2_type
Definition: rte_mbuf.h:481
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:686
uint16_t data_len
Definition: rte_mbuf.h:505
uint32_t lo
Definition: rte_mbuf.h:518
void * userdata
Definition: rte_mbuf.h:547
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1830
struct rte_mbuf::@113::@124 fdir
uint8_t inner_l2_type
Definition: rte_mbuf.h:494
uint64_t tso_segsz
Definition: rte_mbuf.h:566
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:418
uint64_t l4_len
Definition: rte_mbuf.h:565
struct rte_mbuf::@113::@125 sched
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1528
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1215
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1147
uint32_t cache_size
Definition: rte_mempool.h:241
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:311
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:619
#define PKT_TX_IPV6
Definition: rte_mbuf.h:299
uint16_t nb_segs
Definition: rte_mbuf.h:460
uint16_t port
Definition: rte_mbuf.h:463
uint64_t outer_l3_len
Definition: rte_mbuf.h:569
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1366
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1773
uint64_t l3_len
Definition: rte_mbuf.h:564
uint32_t l4_type
Definition: rte_mbuf.h:483
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:318
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1542
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:916
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:255
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:578
uint16_t timesync
Definition: rte_mbuf.h:581
uint32_t hi
Definition: rte_mbuf.h:521
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:420
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:319
#define PKT_TX_IPV4
Definition: rte_mbuf.h:291
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:736
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:825
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1916
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1392
uint64_t outer_l2_len
Definition: rte_mbuf.h:570
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:218
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:348
uint16_t refcnt
Definition: rte_mbuf.h:458
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1722
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1634
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1274
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1364
uint32_t tun_type
Definition: rte_mbuf.h:484
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:299
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:1010
uint64_t ol_flags
Definition: rte_mbuf.h:465
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1325
uint32_t pkt_len
Definition: rte_mbuf.h:504
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:274
uint16_t buf_len
Definition: rte_mbuf.h:535
uint32_t inner_l4_type
Definition: rte_mbuf.h:500
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1644
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1596
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:816
uint32_t packet_type
Definition: rte_mbuf.h:479
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1160
uint32_t seqn
Definition: rte_mbuf.h:584
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1110
uint8_t inner_l3_type
Definition: rte_mbuf.h:496
const char * rte_get_rx_ol_flag_name(uint64_t mask)
#define RTE_STD_C11
Definition: rte_common.h:66
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:283
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:551
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1691
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:834
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:665
uint32_t rss
Definition: rte_mbuf.h:510
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1749
uint64_t rte_iova_t
Definition: rte_memory.h:107
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:700
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1805
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1661
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:889
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1511
uint64_t phys_addr_t
Definition: rte_memory.h:98
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:156
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1557
RTE_STD_C11 union rte_mbuf::@110 __rte_aligned
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:331
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1475
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1233
uint64_t udata64
Definition: rte_mbuf.h:548
uint32_t l3_type
Definition: rte_mbuf.h:482
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:602
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:457
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1510
uint64_t tx_offload
Definition: rte_mbuf.h:557
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:230
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
uint16_t vlan_tci
Definition: rte_mbuf.h:507
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:710
#define RTE_SET_USED(x)
Definition: rte_common.h:111
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1581
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:529