aboutsummaryrefslogtreecommitdiff
path: root/hw/net/net_tx_pkt.h
blob: 4ec8bbe9bd93eaa17a87c401b439d5a94e909ee8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
/*
 * QEMU TX packets abstraction
 *
 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
 *
 * Developed by Daynix Computing LTD (http://www.daynix.com)
 *
 * Authors:
 * Dmitry Fleytman <dmitry@daynix.com>
 * Tamir Shomer <tamirs@daynix.com>
 * Yan Vugenfirer <yan@daynix.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */

#ifndef NET_TX_PKT_H
#define NET_TX_PKT_H

#include "net/eth.h"
#include "exec/hwaddr.h"

/* define to enable packet dump functions */
/*#define NET_TX_PKT_DEBUG*/

struct NetTxPkt;

/**
 * Init function for tx packet functionality
 *
 * @pkt:            packet pointer
 * @pci_dev:        PCI device processing this packet
 * @max_frags:      max tx ip fragments
 * @has_virt_hdr:   device uses virtio header.
 */
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
    uint32_t max_frags, bool has_virt_hdr);

/**
 * Clean all tx packet resources.
 *
 * @pkt:            packet.
 */
void net_tx_pkt_uninit(struct NetTxPkt *pkt);

/**
 * get virtio header
 *
 * @pkt:            packet
 * @ret:            virtio header
 */
struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt);

/**
 * build virtio header (will be stored in module context)
 *
 * @pkt:            packet
 * @tso_enable:     TSO enabled
 * @csum_enable:    CSO enabled
 * @gso_size:       MSS size for TSO
 *
 */
void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
    bool csum_enable, uint32_t gso_size);

/**
* updates vlan tag, and adds vlan header with custom ethernet type
* in case it is missing.
*
* @pkt:            packet
* @vlan:           VLAN tag
* @vlan_ethtype:   VLAN header Ethernet type
*
*/
void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
    uint16_t vlan, uint16_t vlan_ethtype);

/**
* updates vlan tag, and adds vlan header in case it is missing
*
* @pkt:            packet
* @vlan:           VLAN tag
*
*/
static inline void
net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan)
{
    net_tx_pkt_setup_vlan_header_ex(pkt, vlan, ETH_P_VLAN);
}

/**
 * populate data fragment into pkt context.
 *
 * @pkt:            packet
 * @pa:             physical address of fragment
 * @len:            length of fragment
 *
 */
bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
    size_t len);

/**
 * Fix ip header fields and calculate IP header and pseudo header checksums.
 *
 * @pkt:            packet
 *
 */
void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt);

/**
 * Calculate the IP header checksum.
 *
 * @pkt:            packet
 *
 */
void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt);

/**
 * get length of all populated data.
 *
 * @pkt:            packet
 * @ret:            total data length
 *
 */
size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt);

/**
 * get packet type
 *
 * @pkt:            packet
 * @ret:            packet type
 *
 */
eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt);

/**
 * prints packet data if debug is enabled
 *
 * @pkt:            packet
 *
 */
void net_tx_pkt_dump(struct NetTxPkt *pkt);

/**
 * reset tx packet private context (needed to be called between packets)
 *
 * @pkt:            packet
 *
 */
void net_tx_pkt_reset(struct NetTxPkt *pkt);

/**
 * Send packet to qemu. handles sw offloads if vhdr is not supported.
 *
 * @pkt:            packet
 * @nc:             NetClientState
 * @ret:            operation result
 *
 */
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc);

/**
* Redirect packet directly to receive path (emulate loopback phy).
* Handles sw offloads if vhdr is not supported.
*
* @pkt:            packet
* @nc:             NetClientState
* @ret:            operation result
*
*/
bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc);

/**
 * parse raw packet data and analyze offload requirements.
 *
 * @pkt:            packet
 *
 */
bool net_tx_pkt_parse(struct NetTxPkt *pkt);

/**
* indicates if there are data fragments held by this packet object.
*
* @pkt:            packet
*
*/
bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt);

/**
 * Fix IPv6 'plen' field.
 * If ipv6 payload length field is 0 - then there should be Hop-by-Hop
 * option for packets greater than 65,535.
 * For packets with a payload less than 65,535: fix 'plen' field.
 * For backends with vheader, we need just one packet with proper
 * payload size. For now, qemu drops every packet with size greater 64K
 * (see net_tx_pkt_send()) so, there is no reason to add jumbo option to ip6
 * hop-by-hop extension if it's missed
 *
 * @pkt            packet
 */
void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt);

#endif