aboutsummaryrefslogtreecommitdiff
path: root/hw/rdma/rdma_utils.c
blob: c948baf052f9ec5579e4243af2b562656f06fbbc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
 * QEMU paravirtual RDMA - Generic RDMA backend
 *
 * Copyright (C) 2018 Oracle
 * Copyright (C) 2018 Red Hat Inc
 *
 * Authors:
 *     Yuval Shaia <yuval.shaia@oracle.com>
 *     Marcel Apfelbaum <marcel@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */

#include "qemu/osdep.h"
#include "hw/pci/pci_device.h"
#include "trace.h"
#include "rdma_utils.h"

void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t len)
{
    void *p;
    dma_addr_t pci_len = len;

    if (!addr) {
        rdma_error_report("addr is NULL");
        return NULL;
    }

    p = pci_dma_map(dev, addr, &pci_len, DMA_DIRECTION_TO_DEVICE);
    if (!p) {
        rdma_error_report("pci_dma_map fail, addr=0x%"PRIx64", len=%"PRId64,
                          addr, pci_len);
        return NULL;
    }

    if (pci_len != len) {
        rdma_pci_dma_unmap(dev, p, pci_len);
        return NULL;
    }

    trace_rdma_pci_dma_map(addr, p, pci_len);

    return p;
}

void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len)
{
    trace_rdma_pci_dma_unmap(buffer);
    if (buffer) {
        pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0);
    }
}

void rdma_protected_gqueue_init(RdmaProtectedGQueue *list)
{
    qemu_mutex_init(&list->lock);
    list->list = g_queue_new();
}

void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list)
{
    if (list->list) {
        g_queue_free_full(list->list, g_free);
        qemu_mutex_destroy(&list->lock);
        list->list = NULL;
    }
}

void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list,
                                        int64_t value)
{
    qemu_mutex_lock(&list->lock);
    g_queue_push_tail(list->list, g_memdup(&value, sizeof(value)));
    qemu_mutex_unlock(&list->lock);
}

int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list)
{
    int64_t *valp;
    int64_t val;

    qemu_mutex_lock(&list->lock);

    valp = g_queue_pop_head(list->list);
    qemu_mutex_unlock(&list->lock);

    if (!valp) {
        return -ENOENT;
    }

    val = *valp;
    g_free(valp);
    return val;
}

void rdma_protected_gslist_init(RdmaProtectedGSList *list)
{
    qemu_mutex_init(&list->lock);
}

void rdma_protected_gslist_destroy(RdmaProtectedGSList *list)
{
    if (list->list) {
        g_slist_free(list->list);
        qemu_mutex_destroy(&list->lock);
        list->list = NULL;
    }
}

void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list,
                                        int32_t value)
{
    qemu_mutex_lock(&list->lock);
    list->list = g_slist_prepend(list->list, GINT_TO_POINTER(value));
    qemu_mutex_unlock(&list->lock);
}

void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list,
                                        int32_t value)
{
    qemu_mutex_lock(&list->lock);
    list->list = g_slist_remove(list->list, GINT_TO_POINTER(value));
    qemu_mutex_unlock(&list->lock);
}