98 #define PKT_RX_VLAN (1ULL << 0) 100 #define PKT_RX_RSS_HASH (1ULL << 1) 101 #define PKT_RX_FDIR (1ULL << 2) 110 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 119 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 121 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) 129 #define PKT_RX_VLAN_STRIPPED (1ULL << 6) 139 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) 141 #define PKT_RX_IP_CKSUM_UNKNOWN 0 142 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 143 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) 144 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) 154 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) 156 #define PKT_RX_L4_CKSUM_UNKNOWN 0 157 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 158 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) 159 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) 161 #define PKT_RX_IEEE1588_PTP (1ULL << 9) 162 #define PKT_RX_IEEE1588_TMST (1ULL << 10) 163 #define PKT_RX_FDIR_ID (1ULL << 13) 164 #define PKT_RX_FDIR_FLX (1ULL << 14) 174 #define PKT_RX_QINQ_STRIPPED (1ULL << 15) 181 #define PKT_RX_LRO (1ULL << 16) 186 #define PKT_RX_TIMESTAMP (1ULL << 17) 191 #define PKT_RX_SEC_OFFLOAD (1ULL << 18) 196 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19) 205 #define PKT_RX_QINQ (1ULL << 20) 214 #define PKT_TX_SEC_OFFLOAD (1ULL << 43) 220 #define PKT_TX_MACSEC (1ULL << 44) 227 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) 228 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45) 229 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) 230 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) 232 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) 234 #define PKT_TX_TUNNEL_MASK (0xFULL << 45) 239 #define PKT_TX_QINQ_PKT (1ULL << 49) 254 #define PKT_TX_TCP_SEG (1ULL << 50) 256 #define PKT_TX_IEEE1588_TMST (1ULL << 51) 269 #define PKT_TX_L4_NO_CKSUM (0ULL << 52) 270 #define PKT_TX_TCP_CKSUM (1ULL << 52) 271 #define PKT_TX_SCTP_CKSUM (2ULL << 52) 272 #define PKT_TX_UDP_CKSUM (3ULL << 52) 273 #define PKT_TX_L4_MASK (3ULL << 52) 282 #define PKT_TX_IP_CKSUM (1ULL << 54) 290 #define PKT_TX_IPV4 (1ULL << 55) 298 #define PKT_TX_IPV6 (1ULL << 56) 300 #define PKT_TX_VLAN_PKT (1ULL << 57) 310 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) 317 #define PKT_TX_OUTER_IPV4 (1ULL << 59) 324 #define PKT_TX_OUTER_IPV6 (1ULL << 60) 330 #define PKT_TX_OFFLOAD_MASK ( \ 333 PKT_TX_OUTER_IP_CKSUM | \ 335 PKT_TX_IEEE1588_TMST | \ 338 PKT_TX_TUNNEL_MASK | \ 342 #define __RESERVED (1ULL << 61) 344 #define IND_ATTACHED_MBUF (1ULL << 62) 347 #define CTRL_MBUF_FLAG (1ULL << 63) 350 #define RTE_MBUF_PRIV_ALIGN 8 408 #define RTE_MBUF_DEFAULT_DATAROOM 2048 409 #define RTE_MBUF_DEFAULT_BUF_SIZE \ 410 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) 467 MARKER rx_descriptor_fields1;
542 MARKER cacheline1 __rte_cache_min_aligned;
588 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX 620 #if RTE_CACHE_LINE_SIZE == 64 641 return mb->buf_iova + mb->data_off;
646 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
666 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
671 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
709 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF) 714 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb)) 727 #ifdef RTE_LIBRTE_MBUF_DEBUG 730 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h) 735 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0) 739 #ifdef RTE_MBUF_REFCNT_ATOMIC 748 static inline uint16_t
776 static inline uint16_t
799 static inline uint16_t
809 static inline uint16_t
827 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \ 848 #define MBUF_RAW_ALLOC_CHECK(m) do { \ 849 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \ 850 RTE_ASSERT((m)->next == NULL); \ 851 RTE_ASSERT((m)->nb_segs == 1); \ 852 __rte_mbuf_sanity_check(m, 0); \ 882 MBUF_RAW_ALLOC_CHECK(m);
905 RTE_ASSERT(m->
next == NULL);
914 __rte_mbuf_raw_free(
struct rte_mbuf *m)
941 void *m,
unsigned i);
955 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp) 963 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m) 973 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off) 983 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m) 1022 void *m,
unsigned i);
1094 static inline uint16_t
1115 static inline uint16_t
1134 m->data_off =
RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->
buf_len);
1145 #define MBUF_INVALID_PORT UINT16_MAX 1147 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
1182 rte_pktmbuf_reset(m);
1201 struct rte_mbuf **mbufs,
unsigned count)
1215 switch (count % 4) {
1217 while (idx != count) {
1218 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1219 rte_pktmbuf_reset(mbufs[idx]);
1223 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1224 rte_pktmbuf_reset(mbufs[idx]);
1228 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1229 rte_pktmbuf_reset(mbufs[idx]);
1233 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1234 rte_pktmbuf_reset(mbufs[idx]);
1274 mi->buf_iova = m->buf_iova;
1278 mi->data_off = m->data_off;
1317 mbuf_size =
sizeof(
struct rte_mbuf) + priv_size;
1321 m->
buf_addr = (
char *)m + mbuf_size;
1323 m->
buf_len = (uint16_t)buf_len;
1360 if (m->
next != NULL) {
1373 if (m->
next != NULL) {
1387 __rte_pktmbuf_prefree_seg(
struct rte_mbuf *m)
1468 }
while ((md = md->
next) != NULL &&
1502 }
while ((m = m->
next) != NULL);
1547 while (m2->
next != NULL)
1566 #define rte_pktmbuf_mtod_offset(m, t, o) \ 1567 ((t)((char *)(m)->buf_addr + (m)->data_off + (o))) 1581 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) 1592 #define rte_pktmbuf_iova_offset(m, o) \ 1593 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o)) 1596 #define rte_pktmbuf_mtophys_offset(m, o) \ 1597 rte_pktmbuf_iova_offset(m, o) 1606 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) 1609 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m) 1619 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len) 1629 #define rte_pktmbuf_data_len(m) ((m)->data_len) 1658 return (
char *)m->
buf_addr + m->data_off;
1690 return (
char*) tail;
1717 return (
char *)m->
buf_addr + m->data_off;
1767 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
1768 uint32_t len,
void *buf);
1791 uint32_t off, uint32_t len,
void *buf)
1796 return __rte_pktmbuf_read(m, off, len, buf);
1825 cur_tail->
next = tail;
1851 uint64_t inner_l3_offset = m->
l2_len;
1877 !(ol_flags & PKT_TX_IP_CKSUM)))
1881 if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1903 int seg_len, copy_len;
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
uint16_t mbuf_data_room_size
#define __rte_always_inline
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
uint8_t inner_esp_next_proto
__extension__ typedef void * MARKER[0]
#define RTE_MBUF_DIRECT(mb)
#define IND_ATTACHED_MBUF
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
static int rte_validate_tx_offload(const struct rte_mbuf *m)
static void rte_pktmbuf_free(struct rte_mbuf *m)
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
struct rte_mbuf::@113::@124 fdir
__extension__ typedef uint8_t MARKER8[0]
struct rte_mbuf::@113::@125 sched
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
#define PKT_TX_OUTER_IP_CKSUM
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
__extension__ typedef uint64_t MARKER64[0]
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define __rte_mbuf_sanity_check(m, is_h)
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
#define rte_pktmbuf_pkt_len(m)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
static void rte_pktmbuf_detach(struct rte_mbuf *m)
#define rte_pktmbuf_data_len(m)
#define rte_pktmbuf_mtod(m, t)
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
#define MBUF_INVALID_PORT
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
const char * rte_get_rx_ol_flag_name(uint64_t mask)
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
RTE_STD_C11 union rte_mbuf::@110 __rte_aligned
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
rte_atomic16_t refcnt_atomic
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_INDIRECT(mb)
#define rte_pktmbuf_mtod_offset(m, t, o)
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)