diff options
author | Jason Wang <jasowang@redhat.com> | 2013-02-22 23:15:06 +0800 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2013-02-27 16:10:47 +0100 |
commit | f6b26cf257232e5854c0e5c98a8685c625bf986e (patch) | |
tree | 68334c52f1cd34d9ffeaff5be764f1735c011da1 /net/net.c | |
parent | d26e445c80fddcc7483b83f3115e5067fef28fe6 (diff) | |
download | qemu-f6b26cf257232e5854c0e5c98a8685c625bf986e.zip qemu-f6b26cf257232e5854c0e5c98a8685c625bf986e.tar.gz qemu-f6b26cf257232e5854c0e5c98a8685c625bf986e.tar.bz2 |
net: reduce the unnecessary memory allocation of multiqueue
Edivaldo reports a problem that the array of NetClientState in NICState is too
large - MAX_QUEUE_NUM(1024) which will wastes memory even if multiqueue is not
used.
Instead of static arrays, solving this issue by allocating the queues on demand
for both the NetClientState array in NICState and VirtIONetQueue array in
VirtIONet.
Tested by myself, with single virtio-net-pci device. The memory allocation is
almost the same as when multiqueue is not merged.
Cc: Edivaldo de Araujo Pereira <edivaldoapereira@yahoo.com.br>
Cc: qemu-stable@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'net/net.c')
-rw-r--r-- | net/net.c | 19 |
1 files changed, 9 insertions, 10 deletions
@@ -235,23 +235,20 @@ NICState *qemu_new_nic(NetClientInfo *info, const char *name, void *opaque) { - NetClientState *nc; NetClientState **peers = conf->peers.ncs; NICState *nic; - int i; + int i, queues = MAX(1, conf->queues); assert(info->type == NET_CLIENT_OPTIONS_KIND_NIC); assert(info->size >= sizeof(NICState)); - nc = qemu_new_net_client(info, peers[0], model, name); - nc->queue_index = 0; - - nic = qemu_get_nic(nc); + nic = g_malloc0(info->size + sizeof(NetClientState) * queues); + nic->ncs = (void *)nic + info->size; nic->conf = conf; nic->opaque = opaque; - for (i = 1; i < conf->queues; i++) { - qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, nc->name, + for (i = 0; i < queues; i++) { + qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name, NULL); nic->ncs[i].queue_index = i; } @@ -261,7 +258,7 @@ NICState *qemu_new_nic(NetClientInfo *info, NetClientState *qemu_get_subqueue(NICState *nic, int queue_index) { - return &nic->ncs[queue_index]; + return nic->ncs + queue_index; } NetClientState *qemu_get_queue(NICState *nic) @@ -273,7 +270,7 @@ NICState *qemu_get_nic(NetClientState *nc) { NetClientState *nc0 = nc - nc->queue_index; - return DO_UPCAST(NICState, ncs[0], nc0); + return (NICState *)((void *)nc0 - nc->info->size); } void *qemu_get_nic_opaque(NetClientState *nc) @@ -368,6 +365,8 @@ void qemu_del_nic(NICState *nic) qemu_cleanup_net_client(nc); qemu_free_net_client(nc); } + + g_free(nic); } void qemu_foreach_nic(qemu_nic_foreach func, void *opaque) |