diff options
author | Anthony Liguori <aliguori@us.ibm.com> | 2012-03-13 13:56:13 -0500 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2012-03-13 13:56:13 -0500 |
commit | 3e7ecd976b06fc9054a34bda093a70efae99588b (patch) | |
tree | bd4aa371069fb6709f3f8e38c157b3c3185b45b5 | |
parent | 684e1e047950938be259e7d02033f44c427e6ba5 (diff) | |
parent | 2d26512b45b5236fa521c4492608fe9fb5bedf46 (diff) | |
download | qemu-3e7ecd976b06fc9054a34bda093a70efae99588b.zip qemu-3e7ecd976b06fc9054a34bda093a70efae99588b.tar.gz qemu-3e7ecd976b06fc9054a34bda093a70efae99588b.tar.bz2 |
Merge remote-tracking branch 'kiszka/queues/slirp' into staging
* kiszka/queues/slirp:
slirp: Fix compiler warning for w64
slirp: Cleanup resources on instance removal
slirp: Remove unneeded if_queued
slirp: Fix queue walking in if_start
slirp: Prevent recursion of if_start
slirp: Keep next_m always valid
-rw-r--r-- | slirp/cksum.c | 2 | ||||
-rw-r--r-- | slirp/if.c | 86 | ||||
-rw-r--r-- | slirp/ip_icmp.c | 7 | ||||
-rw-r--r-- | slirp/ip_icmp.h | 1 | ||||
-rw-r--r-- | slirp/ip_input.c | 7 | ||||
-rw-r--r-- | slirp/mbuf.c | 21 | ||||
-rw-r--r-- | slirp/mbuf.h | 1 | ||||
-rw-r--r-- | slirp/slirp.c | 10 | ||||
-rw-r--r-- | slirp/slirp.h | 4 | ||||
-rw-r--r-- | slirp/tcp_subr.c | 7 | ||||
-rw-r--r-- | slirp/udp.c | 8 | ||||
-rw-r--r-- | slirp/udp.h | 1 |
12 files changed, 116 insertions, 39 deletions
diff --git a/slirp/cksum.c b/slirp/cksum.c index e43867d..6328660 100644 --- a/slirp/cksum.c +++ b/slirp/cksum.c @@ -75,7 +75,7 @@ int cksum(struct mbuf *m, int len) /* * Force to even boundary. */ - if ((1 & (long) w) && (mlen > 0)) { + if ((1 & (uintptr_t)w) && (mlen > 0)) { REDUCE; sum <<= 8; s_util.c[0] = *(uint8_t *)w; @@ -96,8 +96,13 @@ if_output(struct socket *so, struct mbuf *ifm) ifs_insque(ifm, ifq->ifs_prev); goto diddit; } - } else + } else { ifq = slirp->if_batchq.ifq_prev; + /* Set next_m if the queue was empty so far */ + if (slirp->next_m == &slirp->if_batchq) { + slirp->next_m = ifm; + } + } /* Create a new doubly linked list for this session */ ifm->ifq_so = so; @@ -105,8 +110,6 @@ if_output(struct socket *so, struct mbuf *ifm) insque(ifm, ifq); diddit: - slirp->if_queued++; - if (so) { /* Update *_queued */ so->so_queued++; @@ -152,44 +155,54 @@ diddit: void if_start(Slirp *slirp) { uint64_t now = qemu_get_clock_ns(rt_clock); - int requeued = 0; - bool from_batchq = false; - struct mbuf *ifm, *ifqt; + bool from_batchq, next_from_batchq; + struct mbuf *ifm, *ifm_next, *ifqt; DEBUG_CALL("if_start"); - while (slirp->if_queued) { - /* check if we can really output */ - if (!slirp_can_output(slirp->opaque)) - return; - - /* - * See which queue to get next packet from - * If there's something in the fastq, select it immediately - */ - if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { - ifm = slirp->if_fastq.ifq_next; - } else { - /* Nothing on fastq, see if next_m is valid */ - if (slirp->next_m != &slirp->if_batchq) { - ifm = slirp->next_m; - } else { - ifm = slirp->if_batchq.ifq_next; - } + if (slirp->if_start_busy) { + return; + } + slirp->if_start_busy = true; + + if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { + ifm_next = slirp->if_fastq.ifq_next; + next_from_batchq = false; + } else if (slirp->next_m != &slirp->if_batchq) { + /* Nothing on fastq, pick up from batchq via next_m */ + ifm_next = slirp->next_m; + next_from_batchq = true; + } else { + ifm_next = NULL; + } - from_batchq = true; + while (ifm_next) { + /* check if we can really output */ + if (!slirp_can_output(slirp->opaque)) { + break; } - slirp->if_queued--; + ifm = ifm_next; + from_batchq = next_from_batchq; + + ifm_next = ifm->ifq_next; + if (ifm_next == &slirp->if_fastq) { + /* No more packets in fastq, switch to batchq */ + ifm_next = slirp->next_m; + next_from_batchq = true; + } + if (ifm_next == &slirp->if_batchq) { + /* end of batchq */ + ifm_next = NULL; + } /* Try to send packet unless it already expired */ if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { /* Packet is delayed due to pending ARP resolution */ - requeued++; continue; } - if (from_batchq) { + if (ifm == slirp->next_m) { /* Set which packet to send on next iteration */ slirp->next_m = ifm->ifq_next; } @@ -200,8 +213,20 @@ void if_start(Slirp *slirp) /* If there are more packets for this session, re-queue them */ if (ifm->ifs_next != ifm) { - insque(ifm->ifs_next, ifqt); + struct mbuf *next = ifm->ifs_next; + + insque(next, ifqt); ifs_remque(ifm); + + if (!from_batchq) { + /* Next packet in fastq is from the same session */ + ifm_next = next; + next_from_batchq = false; + } else if (slirp->next_m == &slirp->if_batchq) { + /* Set next_m and ifm_next if the session packet is now the + * only one on batchq */ + slirp->next_m = ifm_next = next; + } } /* Update so_queued */ @@ -211,8 +236,7 @@ void if_start(Slirp *slirp) } m_free(ifm); - } - slirp->if_queued = requeued; + slirp->if_start_busy = false; } diff --git a/slirp/ip_icmp.c b/slirp/ip_icmp.c index 5dbf21d..d571fd0 100644 --- a/slirp/ip_icmp.c +++ b/slirp/ip_icmp.c @@ -66,6 +66,13 @@ void icmp_init(Slirp *slirp) slirp->icmp_last_so = &slirp->icmp; } +void icmp_cleanup(Slirp *slirp) +{ + while (slirp->icmp.so_next != &slirp->icmp) { + icmp_detach(slirp->icmp.so_next); + } +} + static int icmp_send(struct socket *so, struct mbuf *m, int hlen) { struct ip *ip = mtod(m, struct ip *); diff --git a/slirp/ip_icmp.h b/slirp/ip_icmp.h index b3da1f2..1a1af91 100644 --- a/slirp/ip_icmp.h +++ b/slirp/ip_icmp.h @@ -154,6 +154,7 @@ struct icmp { (type) == ICMP_MASKREQ || (type) == ICMP_MASKREPLY) void icmp_init(Slirp *slirp); +void icmp_cleanup(Slirp *slirp); void icmp_input(struct mbuf *, int); void icmp_error(struct mbuf *msrc, u_char type, u_char code, int minsize, const char *message); diff --git a/slirp/ip_input.c b/slirp/ip_input.c index c7b3eb4..ce24faf 100644 --- a/slirp/ip_input.c +++ b/slirp/ip_input.c @@ -61,6 +61,13 @@ ip_init(Slirp *slirp) icmp_init(slirp); } +void ip_cleanup(Slirp *slirp) +{ + udp_cleanup(slirp); + tcp_cleanup(slirp); + icmp_cleanup(slirp); +} + /* * Ip input routine. Checksum and byte swap header. If fragmented * try to reassemble. Process options. Pass to next level. diff --git a/slirp/mbuf.c b/slirp/mbuf.c index c699c75..4fefb04 100644 --- a/slirp/mbuf.c +++ b/slirp/mbuf.c @@ -32,6 +32,27 @@ m_init(Slirp *slirp) slirp->m_usedlist.m_next = slirp->m_usedlist.m_prev = &slirp->m_usedlist; } +void m_cleanup(Slirp *slirp) +{ + struct mbuf *m, *next; + + m = slirp->m_usedlist.m_next; + while (m != &slirp->m_usedlist) { + next = m->m_next; + if (m->m_flags & M_EXT) { + free(m->m_ext); + } + free(m); + m = next; + } + m = slirp->m_freelist.m_next; + while (m != &slirp->m_freelist) { + next = m->m_next; + free(m); + m = next; + } +} + /* * Get an mbuf from the free list, if there are none * malloc one diff --git a/slirp/mbuf.h b/slirp/mbuf.h index 8d7951f..3f3ab09 100644 --- a/slirp/mbuf.h +++ b/slirp/mbuf.h @@ -116,6 +116,7 @@ struct mbuf { * it rather than putting it on the free list */ void m_init(Slirp *); +void m_cleanup(Slirp *slirp); struct mbuf * m_get(Slirp *); void m_free(struct mbuf *); void m_cat(register struct mbuf *, register struct mbuf *); diff --git a/slirp/slirp.c b/slirp/slirp.c index 19d69eb..1502830 100644 --- a/slirp/slirp.c +++ b/slirp/slirp.c @@ -246,6 +246,9 @@ void slirp_cleanup(Slirp *slirp) unregister_savevm(NULL, "slirp", slirp); + ip_cleanup(slirp); + m_cleanup(slirp); + g_free(slirp->tftp_prefix); g_free(slirp->bootp_filename); g_free(slirp); @@ -581,12 +584,7 @@ void slirp_select_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, } } - /* - * See if we can start outputting - */ - if (slirp->if_queued) { - if_start(slirp); - } + if_start(slirp); } /* clear global file descriptor sets. diff --git a/slirp/slirp.h b/slirp/slirp.h index 28a5c037..5033ee3 100644 --- a/slirp/slirp.h +++ b/slirp/slirp.h @@ -235,10 +235,10 @@ struct Slirp { int mbuf_alloced; /* if states */ - int if_queued; /* number of packets queued so far */ struct mbuf if_fastq; /* fast queue (for interactive data) */ struct mbuf if_batchq; /* queue for non-interactive data */ struct mbuf *next_m; /* pointer to next mbuf to output */ + bool if_start_busy; /* avoid if_start recursion */ /* ip states */ struct ipq ipq; /* ip reass. queue */ @@ -315,6 +315,7 @@ void if_output(struct socket *, struct mbuf *); /* ip_input.c */ void ip_init(Slirp *); +void ip_cleanup(Slirp *); void ip_input(struct mbuf *); void ip_slowtimo(Slirp *); void ip_stripoptions(register struct mbuf *, struct mbuf *); @@ -332,6 +333,7 @@ void tcp_setpersist(register struct tcpcb *); /* tcp_subr.c */ void tcp_init(Slirp *); +void tcp_cleanup(Slirp *); void tcp_template(struct tcpcb *); void tcp_respond(struct tcpcb *, register struct tcpiphdr *, register struct mbuf *, tcp_seq, tcp_seq, int); struct tcpcb * tcp_newtcpcb(struct socket *); diff --git a/slirp/tcp_subr.c b/slirp/tcp_subr.c index 143a238..6f6585a 100644 --- a/slirp/tcp_subr.c +++ b/slirp/tcp_subr.c @@ -55,6 +55,13 @@ tcp_init(Slirp *slirp) slirp->tcp_last_so = &slirp->tcb; } +void tcp_cleanup(Slirp *slirp) +{ + while (slirp->tcb.so_next != &slirp->tcb) { + tcp_close(sototcpcb(slirp->tcb.so_next)); + } +} + /* * Create template to be used to send tcp packets on a connection. * Call after host entry created, fills diff --git a/slirp/udp.c b/slirp/udp.c index 5b060f3..ced5096 100644 --- a/slirp/udp.c +++ b/slirp/udp.c @@ -49,6 +49,14 @@ udp_init(Slirp *slirp) slirp->udb.so_next = slirp->udb.so_prev = &slirp->udb; slirp->udp_last_so = &slirp->udb; } + +void udp_cleanup(Slirp *slirp) +{ + while (slirp->udb.so_next != &slirp->udb) { + udp_detach(slirp->udb.so_next); + } +} + /* m->m_data points at ip packet header * m->m_len length ip packet * ip->ip_len length data (IPDU) diff --git a/slirp/udp.h b/slirp/udp.h index 9b5c3cf..9bf31fe 100644 --- a/slirp/udp.h +++ b/slirp/udp.h @@ -74,6 +74,7 @@ struct udpiphdr { struct mbuf; void udp_init(Slirp *); +void udp_cleanup(Slirp *); void udp_input(register struct mbuf *, int); int udp_output(struct socket *, struct mbuf *, struct sockaddr_in *); int udp_attach(struct socket *); |