aboutsummaryrefslogtreecommitdiff
path: root/tests/tcg/multiarch/sigbus.c
blob: 8134c5fd56805c420a711da65525e02182e4c5ca (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#define _GNU_SOURCE 1

#include <assert.h>
#include <stdlib.h>
#include <signal.h>
#include <endian.h>


unsigned long long x = 0x8877665544332211ull;
void * volatile p = (void *)&x + 1;

void sigbus(int sig, siginfo_t *info, void *uc)
{
    assert(sig == SIGBUS);
    assert(info->si_signo == SIGBUS);
#ifdef BUS_ADRALN
    assert(info->si_code == BUS_ADRALN);
#endif
    assert(info->si_addr == p);
    exit(EXIT_SUCCESS);
}

int main()
{
    struct sigaction sa = {
        .sa_sigaction = sigbus,
        .sa_flags = SA_SIGINFO
    };
    int allow_fail = 0;
    int tmp;

    tmp = sigaction(SIGBUS, &sa, NULL);
    assert(tmp == 0);

    /*
     * Select an operation that's likely to enforce alignment.
     * On many guests that support unaligned accesses by default,
     * this is often an atomic operation.
     */
#if defined(__aarch64__)
    asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
#elif defined(__alpha__)
    asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
#elif defined(__arm__)
    asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
#elif defined(__powerpc__)
    asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
#elif defined(__riscv_atomic)
    asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
#else
    /* No insn known to fault unaligned -- try for a straight load. */
    allow_fail = 1;
    tmp = *(volatile int *)p;
#endif

    assert(allow_fail);

    /*
     * We didn't see a signal.
     * We might as well validate the unaligned load worked.
     */
    if (BYTE_ORDER == LITTLE_ENDIAN) {
        assert(tmp == 0x55443322);
    } else {
        assert(tmp == 0x77665544);
    }
    return EXIT_SUCCESS;
}