aboutsummaryrefslogtreecommitdiff
path: root/scripts/coverity-scan
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2021-07-27 17:55:41 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2021-07-30 12:04:01 +0200
commit05ad6857a57238c27df84f6c0c1943dd162a82ad (patch)
treed7dc6046c498ac8486b8eb769335fbc19058c511 /scripts/coverity-scan
parent96915d638cb83aa139e39096815b8dd9832f264b (diff)
downloadqemu-05ad6857a57238c27df84f6c0c1943dd162a82ad.zip
qemu-05ad6857a57238c27df84f6c0c1943dd162a82ad.tar.gz
qemu-05ad6857a57238c27df84f6c0c1943dd162a82ad.tar.bz2
coverity-model: clean up the models for array allocation functions
sz is only used in one place, so replace it with nmemb * size in that one place. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'scripts/coverity-scan')
-rw-r--r--scripts/coverity-scan/model.c13
1 files changed, 3 insertions, 10 deletions
diff --git a/scripts/coverity-scan/model.c b/scripts/coverity-scan/model.c
index 1a5f39d..2d384bd 100644
--- a/scripts/coverity-scan/model.c
+++ b/scripts/coverity-scan/model.c
@@ -178,13 +178,11 @@ uint8_t replay_get_byte(void)
void *g_malloc_n(size_t nmemb, size_t size)
{
- size_t sz;
void *ptr;
__coverity_negative_sink__(nmemb);
__coverity_negative_sink__(size);
- sz = nmemb * size;
- ptr = __coverity_alloc__(sz);
+ ptr = __coverity_alloc__(nmemb * size);
__coverity_mark_as_uninitialized_buffer__(ptr);
__coverity_mark_as_afm_allocated__(ptr, AFM_free);
return ptr;
@@ -192,13 +190,11 @@ void *g_malloc_n(size_t nmemb, size_t size)
void *g_malloc0_n(size_t nmemb, size_t size)
{
- size_t sz;
void *ptr;
__coverity_negative_sink__(nmemb);
__coverity_negative_sink__(size);
- sz = nmemb * size;
- ptr = __coverity_alloc__(sz);
+ ptr = __coverity_alloc__(nmemb * size);
__coverity_writeall0__(ptr);
__coverity_mark_as_afm_allocated__(ptr, AFM_free);
return ptr;
@@ -206,13 +202,10 @@ void *g_malloc0_n(size_t nmemb, size_t size)
void *g_realloc_n(void *ptr, size_t nmemb, size_t size)
{
- size_t sz;
-
__coverity_negative_sink__(nmemb);
__coverity_negative_sink__(size);
- sz = nmemb * size;
__coverity_escape__(ptr);
- ptr = __coverity_alloc__(sz);
+ ptr = __coverity_alloc__(nmemb * size);
/*
* Memory beyond the old size isn't actually initialized. Can't
* model that. See Coverity's realloc() model