diff options
author | Emanuele Giuseppe Esposito <eesposit@redhat.com> | 2022-09-26 05:31:58 -0400 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2022-10-07 12:11:41 +0200 |
commit | afe1e8a7b3e671993cf55e2321408650c7620999 (patch) | |
tree | 8ee15874f7c49500ca8bcb4267fe52dae152ccc5 /include | |
parent | fd4b14e299d7def753f2d753e61d125cd5e1cbd8 (diff) | |
download | qemu-afe1e8a7b3e671993cf55e2321408650c7620999.zip qemu-afe1e8a7b3e671993cf55e2321408650c7620999.tar.gz qemu-afe1e8a7b3e671993cf55e2321408650c7620999.tar.bz2 |
job.c: add job_lock/unlock while keeping job.h intact
With "intact" we mean that all job.h functions implicitly
take the lock. Therefore API callers are unmodified.
This means that:
- many static functions that will be always called with job lock held
become _locked, and call _locked functions
- all public functions take the lock internally if needed, and call _locked
functions
- all public functions called internally by other functions in job.c will have a
_locked counterpart (sometimes public), to avoid deadlocks (job lock already taken).
These functions are not used for now.
- some public functions called only from exernal files (not job.c) do not
have _locked() counterpart and take the lock inside. Others won't need
the lock at all because use fields only set at initialization and
never modified.
job_{lock/unlock} is independent from real_job_{lock/unlock}.
Note: at this stage, job_{lock/unlock} and job lock guard macros
are *nop*
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220926093214.506243-6-eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/qemu/job.h | 138 |
1 files changed, 131 insertions, 7 deletions
diff --git a/include/qemu/job.h b/include/qemu/job.h index e3e31e2..870dce1 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -358,8 +358,15 @@ JobTxn *job_txn_new(void); */ void job_txn_unref(JobTxn *txn); +/* + * Same as job_txn_unref(), but called with job lock held. + * Might release the lock temporarily. + */ +void job_txn_unref_locked(JobTxn *txn); + /** * Create a new long-running job and return it. + * Called with job_mutex *not* held. * * @job_id: The id of the newly-created job, or %NULL for internal jobs * @driver: The class object for the newly-created job. @@ -380,17 +387,25 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, */ void job_ref(Job *job); +/* Same as job_ref(), but called with job lock held. */ +void job_ref_locked(Job *job); + /** * Release a reference that was previously acquired with job_ref() or * job_create(). If it's the last reference to the object, it will be freed. */ void job_unref(Job *job); +/* Same as job_unref(), but called with job lock held. */ +void job_unref_locked(Job *job); + /** * @job: The job that has made progress * @done: How much progress the job made since the last call * * Updates the progress counter of the job. + * + * May be called with mutex held or not held. */ void job_progress_update(Job *job, uint64_t done); @@ -401,6 +416,8 @@ void job_progress_update(Job *job, uint64_t done); * * Sets the expected end value of the progress counter of a job so that a * completion percentage can be calculated when the progress is updated. + * + * May be called with mutex held or not held. */ void job_progress_set_remaining(Job *job, uint64_t remaining); @@ -416,6 +433,8 @@ void job_progress_set_remaining(Job *job, uint64_t remaining); * length before, and job_progress_update() afterwards. * (So the operation acts as a parenthesis in regards to the main job * operation running in background.) + * + * May be called with mutex held or not held. */ void job_progress_increase_remaining(Job *job, uint64_t delta); @@ -426,11 +445,19 @@ void job_progress_increase_remaining(Job *job, uint64_t delta); */ void job_enter_cond(Job *job, bool(*fn)(Job *job)); +/* + * Same as job_enter_cond(), but called with job lock held. + * Might release the lock temporarily. + */ +void job_enter_cond_locked(Job *job, bool(*fn)(Job *job)); + /** * @job: A job that has not yet been started. * * Begins execution of a job. * Takes ownership of one reference to the job object. + * + * Called with job_mutex *not* held. */ void job_start(Job *job); @@ -438,6 +465,7 @@ void job_start(Job *job); * @job: The job to enter. * * Continue the specified job by entering the coroutine. + * Called with job_mutex *not* held. */ void job_enter(Job *job); @@ -446,6 +474,8 @@ void job_enter(Job *job); * * Pause now if job_pause() has been called. Jobs that perform lots of I/O * must call this between requests so that the job can be paused. + * + * Called with job_mutex *not* held. */ void coroutine_fn job_pause_point(Job *job); @@ -453,6 +483,7 @@ void coroutine_fn job_pause_point(Job *job); * @job: The job that calls the function. * * Yield the job coroutine. + * Called with job_mutex *not* held. */ void coroutine_fn job_yield(Job *job); @@ -463,6 +494,8 @@ void coroutine_fn job_yield(Job *job); * Put the job to sleep (assuming that it wasn't canceled) for @ns * %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately * interrupt the wait. + * + * Called with job_mutex *not* held. */ void coroutine_fn job_sleep_ns(Job *job, int64_t ns); @@ -475,21 +508,40 @@ const char *job_type_str(const Job *job); /** Returns true if the job should not be visible to the management layer. */ bool job_is_internal(Job *job); -/** Returns whether the job is being cancelled. */ +/** + * Returns whether the job is being cancelled. + * Called with job_mutex *not* held. + */ bool job_is_cancelled(Job *job); +/* Same as job_is_cancelled(), but called with job lock held. */ +bool job_is_cancelled_locked(Job *job); + /** * Returns whether the job is scheduled for cancellation (at an * indefinite point). + * Called with job_mutex *not* held. */ bool job_cancel_requested(Job *job); -/** Returns whether the job is in a completed state. */ +/** + * Returns whether the job is in a completed state. + * Called with job_mutex *not* held. + */ bool job_is_completed(Job *job); -/** Returns whether the job is ready to be completed. */ +/* Same as job_is_completed(), but called with job lock held. */ +bool job_is_completed_locked(Job *job); + +/** + * Returns whether the job is ready to be completed. + * Called with job_mutex *not* held. + */ bool job_is_ready(Job *job); +/* Same as job_is_ready(), but called with job lock held. */ +bool job_is_ready_locked(Job *job); + /** * Request @job to pause at the next pause point. Must be paired with * job_resume(). If the job is supposed to be resumed by user action, call @@ -497,24 +549,45 @@ bool job_is_ready(Job *job); */ void job_pause(Job *job); +/* Same as job_pause(), but called with job lock held. */ +void job_pause_locked(Job *job); + /** Resumes a @job paused with job_pause. */ void job_resume(Job *job); +/* + * Same as job_resume(), but called with job lock held. + * Might release the lock temporarily. + */ +void job_resume_locked(Job *job); + /** * Asynchronously pause the specified @job. * Do not allow a resume until a matching call to job_user_resume. */ void job_user_pause(Job *job, Error **errp); +/* Same as job_user_pause(), but called with job lock held. */ +void job_user_pause_locked(Job *job, Error **errp); + /** Returns true if the job is user-paused. */ bool job_user_paused(Job *job); +/* Same as job_user_paused(), but called with job lock held. */ +bool job_user_paused_locked(Job *job); + /** * Resume the specified @job. * Must be paired with a preceding job_user_pause. */ void job_user_resume(Job *job, Error **errp); +/* + * Same as job_user_resume(), but called with job lock held. + * Might release the lock temporarily. + */ +void job_user_resume_locked(Job *job, Error **errp); + /** * Get the next element from the list of block jobs after @job, or the * first one if @job is %NULL. @@ -523,6 +596,9 @@ void job_user_resume(Job *job, Error **errp); */ Job *job_next(Job *job); +/* Same as job_next(), but called with job lock held. */ +Job *job_next_locked(Job *job); + /** * Get the job identified by @id (which must not be %NULL). * @@ -530,6 +606,9 @@ Job *job_next(Job *job); */ Job *job_get(const char *id); +/* Same as job_get(), but called with job lock held. */ +Job *job_get_locked(const char *id); + /** * Check whether the verb @verb can be applied to @job in its current state. * Returns 0 if the verb can be applied; otherwise errp is set and -EPERM @@ -537,27 +616,48 @@ Job *job_get(const char *id); */ int job_apply_verb(Job *job, JobVerb verb, Error **errp); -/** The @job could not be started, free it. */ +/* Same as job_apply_verb, but called with job lock held. */ +int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp); + +/** + * The @job could not be started, free it. + * Called with job_mutex *not* held. + */ void job_early_fail(Job *job); -/** Moves the @job from RUNNING to READY */ +/** + * Moves the @job from RUNNING to READY. + * Called with job_mutex *not* held. + */ void job_transition_to_ready(Job *job); /** Asynchronously complete the specified @job. */ void job_complete(Job *job, Error **errp); +/* + * Same as job_complete(), but called with job lock held. + * Might release the lock temporarily. + */ +void job_complete_locked(Job *job, Error **errp); + /** * Asynchronously cancel the specified @job. If @force is true, the job should * be cancelled immediately without waiting for a consistent state. */ void job_cancel(Job *job, bool force); +/* Same as job_cancel(), but called with job lock held. */ +void job_cancel_locked(Job *job, bool force); + /** * Cancels the specified job like job_cancel(), but may refuse to do so if the * operation isn't meaningful in the current state of the job. */ void job_user_cancel(Job *job, bool force, Error **errp); +/* Same as job_user_cancel(), but called with job lock held. */ +void job_user_cancel_locked(Job *job, bool force, Error **errp); + /** * Synchronously cancel the @job. The completion callback is called * before the function returns. If @force is false, the job may @@ -571,7 +671,14 @@ void job_user_cancel(Job *job, bool force, Error **errp); */ int job_cancel_sync(Job *job, bool force); -/** Synchronously force-cancels all jobs using job_cancel_sync(). */ +/* Same as job_cancel_sync, but called with job lock held. */ +int job_cancel_sync_locked(Job *job, bool force); + +/** + * Synchronously force-cancels all jobs using job_cancel_sync_locked(). + * + * Called with job_lock *not* held. + */ void job_cancel_sync_all(void); /** @@ -590,6 +697,9 @@ void job_cancel_sync_all(void); */ int job_complete_sync(Job *job, Error **errp); +/* Same as job_complete_sync, but called with job lock held. */ +int job_complete_sync_locked(Job *job, Error **errp); + /** * For a @job that has finished its work and is pending awaiting explicit * acknowledgement to commit its work, this will commit that work. @@ -600,12 +710,18 @@ int job_complete_sync(Job *job, Error **errp); */ void job_finalize(Job *job, Error **errp); +/* Same as job_finalize(), but called with job lock held. */ +void job_finalize_locked(Job *job, Error **errp); + /** * Remove the concluded @job from the query list and resets the passed pointer * to %NULL. Returns an error if the job is not actually concluded. */ void job_dismiss(Job **job, Error **errp); +/* Same as job_dismiss(), but called with job lock held. */ +void job_dismiss_locked(Job **job, Error **errp); + /** * Synchronously finishes the given @job. If @finish is given, it is called to * trigger completion or cancellation of the job. @@ -615,6 +731,14 @@ void job_dismiss(Job **job, Error **errp); * * Callers must hold the AioContext lock of job->aio_context. */ -int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp); +int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), + Error **errp); + +/* + * Same as job_finish_sync(), but called with job lock held. + * Might release the lock temporarily. + */ +int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp), + Error **errp); #endif |