mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-08 16:22:41 -04:00
KVM: s390: Use try_cmpxchg() instead of cmpxchg() loops
Convert all cmpxchg() loops to try_cmpxchg() loops. With gcc 14 and the usage of flag output operands in try_cmpxchg() this allows the compiler to generate slightly better code. Acked-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Acked-by: Janosch Frank <frankja@linux.ibm.com> Link: https://lore.kernel.org/r/20241126102515.3178914-2-hca@linux.ibm.com Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
@@ -129,8 +129,8 @@ static void ipte_lock_simple(struct kvm *kvm)
|
||||
retry:
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = kvm_s390_get_ipte_control(kvm);
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
old = READ_ONCE(*ic);
|
||||
if (old.k) {
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
cond_resched();
|
||||
@@ -138,7 +138,7 @@ static void ipte_lock_simple(struct kvm *kvm)
|
||||
}
|
||||
new = old;
|
||||
new.k = 1;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.ipte_mutex);
|
||||
@@ -154,11 +154,11 @@ static void ipte_unlock_simple(struct kvm *kvm)
|
||||
goto out;
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = kvm_s390_get_ipte_control(kvm);
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
old = READ_ONCE(*ic);
|
||||
new = old;
|
||||
new.k = 0;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
wake_up(&kvm->arch.ipte_wq);
|
||||
out:
|
||||
@@ -172,8 +172,8 @@ static void ipte_lock_siif(struct kvm *kvm)
|
||||
retry:
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = kvm_s390_get_ipte_control(kvm);
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
old = READ_ONCE(*ic);
|
||||
if (old.kg) {
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
cond_resched();
|
||||
@@ -182,7 +182,7 @@ static void ipte_lock_siif(struct kvm *kvm)
|
||||
new = old;
|
||||
new.k = 1;
|
||||
new.kh++;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
}
|
||||
|
||||
@@ -192,13 +192,13 @@ static void ipte_unlock_siif(struct kvm *kvm)
|
||||
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
ic = kvm_s390_get_ipte_control(kvm);
|
||||
old = READ_ONCE(*ic);
|
||||
do {
|
||||
old = READ_ONCE(*ic);
|
||||
new = old;
|
||||
new.kh--;
|
||||
if (!new.kh)
|
||||
new.k = 0;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
if (!new.kh)
|
||||
wake_up(&kvm->arch.ipte_wq);
|
||||
|
||||
@@ -247,12 +247,12 @@ static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
|
||||
{
|
||||
u64 word, _word;
|
||||
|
||||
word = READ_ONCE(gisa->u64.word[0]);
|
||||
do {
|
||||
word = READ_ONCE(gisa->u64.word[0]);
|
||||
if ((u64)gisa != word >> 32)
|
||||
return -EBUSY;
|
||||
_word = (word & ~0xffUL) | iam;
|
||||
} while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
|
||||
} while (!try_cmpxchg(&gisa->u64.word[0], &word, _word));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -270,10 +270,10 @@ static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
|
||||
{
|
||||
u64 word, _word;
|
||||
|
||||
word = READ_ONCE(gisa->u64.word[0]);
|
||||
do {
|
||||
word = READ_ONCE(gisa->u64.word[0]);
|
||||
_word = word & ~(0xffUL << 24);
|
||||
} while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
|
||||
} while (!try_cmpxchg(&gisa->u64.word[0], &word, _word));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -291,14 +291,14 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
|
||||
u8 pending_mask, alert_mask;
|
||||
u64 word, _word;
|
||||
|
||||
word = READ_ONCE(gi->origin->u64.word[0]);
|
||||
do {
|
||||
word = READ_ONCE(gi->origin->u64.word[0]);
|
||||
alert_mask = READ_ONCE(gi->alert.mask);
|
||||
pending_mask = (u8)(word >> 24) & alert_mask;
|
||||
if (pending_mask)
|
||||
return pending_mask;
|
||||
_word = (word & ~0xffUL) | alert_mask;
|
||||
} while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
|
||||
} while (!try_cmpxchg(&gi->origin->u64.word[0], &word, _word));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1907,11 +1907,11 @@ static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
|
||||
|
||||
read_lock(&kvm->arch.sca_lock);
|
||||
sca = kvm->arch.sca;
|
||||
old = READ_ONCE(sca->utility);
|
||||
do {
|
||||
old = READ_ONCE(sca->utility);
|
||||
new = old;
|
||||
new.mtcr = val;
|
||||
} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
|
||||
} while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
|
||||
read_unlock(&kvm->arch.sca_lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -208,13 +208,12 @@ static inline int account_mem(unsigned long nr_pages)
|
||||
|
||||
page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||
|
||||
cur_pages = atomic_long_read(&user->locked_vm);
|
||||
do {
|
||||
cur_pages = atomic_long_read(&user->locked_vm);
|
||||
new_pages = cur_pages + nr_pages;
|
||||
if (new_pages > page_limit)
|
||||
return -ENOMEM;
|
||||
} while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
|
||||
new_pages) != cur_pages);
|
||||
} while (!atomic_long_try_cmpxchg(&user->locked_vm, &cur_pages, new_pages));
|
||||
|
||||
atomic64_add(nr_pages, ¤t->mm->pinned_vm);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user