Skip to content

Commit e0c26d4

Browse files
committed
Merge tag 'kvm-s390-next-6.19-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
- SCA rework - VIRT_XFER_TO_GUEST_WORK support - Operation exception forwarding support - Cleanups
2 parents f58e70c + 2bd1337 commit e0c26d4

File tree

15 files changed

+271
-271
lines changed

15 files changed

+271
-271
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7855,7 +7855,7 @@ where 0xff represents CPUs 0-7 in cluster 0.
78557855
:Architectures: s390
78567856
:Parameters: none
78577857

7858-
With this capability enabled, all illegal instructions 0x0000 (2 bytes) will
7858+
With this capability enabled, the illegal instruction 0x0000 (2 bytes) will
78597859
be intercepted and forwarded to user space. User space can use this
78607860
mechanism e.g. to realize 2-byte software breakpoints. The kernel will
78617861
not inject an operating exception for these instructions, user space has
@@ -8727,7 +8727,7 @@ given VM.
87278727
When this capability is enabled, KVM resets the VCPU when setting
87288728
MP_STATE_INIT_RECEIVED through IOCTL. The original MP_STATE is preserved.
87298729

8730-
7.43 KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED
8730+
7.44 KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED
87318731
-------------------------------------------
87328732

87338733
:Architectures: arm64
@@ -8750,6 +8750,21 @@ When this capability is enabled, KVM may exit to userspace for SEAs taken to
87508750
EL2 resulting from a guest access. See ``KVM_EXIT_ARM_SEA`` for more
87518751
information.
87528752

8753+
7.46 KVM_CAP_S390_USER_OPEREXEC
8754+
-------------------------------
8755+
8756+
:Architectures: s390
8757+
:Parameters: none
8758+
8759+
When this capability is enabled KVM forwards all operation exceptions
8760+
that it doesn't handle itself to user space. This also includes the
8761+
0x0000 instructions managed by KVM_CAP_S390_USER_INSTR0. This is
8762+
helpful if user space wants to emulate instructions which are not
8763+
(yet) implemented in hardware.
8764+
8765+
This capability can be enabled dynamically even if VCPUs were already
8766+
created and are running.
8767+
87538768
8. Other capabilities.
87548769
======================
87558770

arch/s390/include/asm/kvm_host.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@ struct kvm_vcpu_stat {
146146
u64 instruction_diagnose_500;
147147
u64 instruction_diagnose_other;
148148
u64 pfault_sync;
149+
u64 signal_exits;
149150
};
150151

151152
#define PGM_OPERATION 0x01
@@ -631,10 +632,8 @@ struct kvm_s390_pv {
631632
struct mmu_notifier mmu_notifier;
632633
};
633634

634-
struct kvm_arch{
635-
void *sca;
636-
int use_esca;
637-
rwlock_t sca_lock;
635+
struct kvm_arch {
636+
struct esca_block *sca;
638637
debug_info_t *dbf;
639638
struct kvm_s390_float_interrupt float_int;
640639
struct kvm_device *flic;
@@ -650,6 +649,7 @@ struct kvm_arch{
650649
int user_sigp;
651650
int user_stsi;
652651
int user_instr0;
652+
int user_operexec;
653653
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
654654
wait_queue_head_t ipte_wq;
655655
int ipte_lock_count;

arch/s390/include/asm/stacktrace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ struct stack_frame {
6666
unsigned long sie_flags;
6767
unsigned long sie_control_block_phys;
6868
unsigned long sie_guest_asce;
69+
unsigned long sie_irq;
6970
};
7071
};
7172
unsigned long gprs[10];

arch/s390/kernel/asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ int main(void)
6464
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
6565
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
6666
OFFSET(__SF_SIE_GUEST_ASCE, stack_frame, sie_guest_asce);
67+
OFFSET(__SF_SIE_IRQ, stack_frame, sie_irq);
6768
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
6869
BLANK();
6970
OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);

arch/s390/kernel/entry.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ SYM_FUNC_START(__sie64a)
189189
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
190190
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
191191
mvi __TI_sie(%r14),1
192+
stosm __SF_SIE_IRQ(%r15),0x03 # enable interrupts
192193
lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
193194
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
194195
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
@@ -212,6 +213,7 @@ SYM_FUNC_START(__sie64a)
212213
lg %r14,__LC_CURRENT(%r14)
213214
mvi __TI_sie(%r14),0
214215
SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
216+
stnsm __SF_SIE_IRQ(%r15),0xfc # disable interrupts
215217
lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
216218
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
217219
xgr %r0,%r0 # clear guest registers to

arch/s390/kvm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ config KVM
2929
select HAVE_KVM_NO_POLL
3030
select KVM_VFIO
3131
select MMU_NOTIFIER
32+
select VIRT_XFER_TO_GUEST_WORK
3233
help
3334
Support hosting paravirtualized guest machines using the SIE
3435
virtualization capability on the mainframe. This should work

arch/s390/kvm/gaccess.c

Lines changed: 6 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -109,14 +109,9 @@ struct aste {
109109

110110
int ipte_lock_held(struct kvm *kvm)
111111
{
112-
if (sclp.has_siif) {
113-
int rc;
112+
if (sclp.has_siif)
113+
return kvm->arch.sca->ipte_control.kh != 0;
114114

115-
read_lock(&kvm->arch.sca_lock);
116-
rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
117-
read_unlock(&kvm->arch.sca_lock);
118-
return rc;
119-
}
120115
return kvm->arch.ipte_lock_count != 0;
121116
}
122117

@@ -129,19 +124,16 @@ static void ipte_lock_simple(struct kvm *kvm)
129124
if (kvm->arch.ipte_lock_count > 1)
130125
goto out;
131126
retry:
132-
read_lock(&kvm->arch.sca_lock);
133-
ic = kvm_s390_get_ipte_control(kvm);
127+
ic = &kvm->arch.sca->ipte_control;
134128
old = READ_ONCE(*ic);
135129
do {
136130
if (old.k) {
137-
read_unlock(&kvm->arch.sca_lock);
138131
cond_resched();
139132
goto retry;
140133
}
141134
new = old;
142135
new.k = 1;
143136
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
144-
read_unlock(&kvm->arch.sca_lock);
145137
out:
146138
mutex_unlock(&kvm->arch.ipte_mutex);
147139
}
@@ -154,14 +146,12 @@ static void ipte_unlock_simple(struct kvm *kvm)
154146
kvm->arch.ipte_lock_count--;
155147
if (kvm->arch.ipte_lock_count)
156148
goto out;
157-
read_lock(&kvm->arch.sca_lock);
158-
ic = kvm_s390_get_ipte_control(kvm);
149+
ic = &kvm->arch.sca->ipte_control;
159150
old = READ_ONCE(*ic);
160151
do {
161152
new = old;
162153
new.k = 0;
163154
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
164-
read_unlock(&kvm->arch.sca_lock);
165155
wake_up(&kvm->arch.ipte_wq);
166156
out:
167157
mutex_unlock(&kvm->arch.ipte_mutex);
@@ -172,36 +162,31 @@ static void ipte_lock_siif(struct kvm *kvm)
172162
union ipte_control old, new, *ic;
173163

174164
retry:
175-
read_lock(&kvm->arch.sca_lock);
176-
ic = kvm_s390_get_ipte_control(kvm);
165+
ic = &kvm->arch.sca->ipte_control;
177166
old = READ_ONCE(*ic);
178167
do {
179168
if (old.kg) {
180-
read_unlock(&kvm->arch.sca_lock);
181169
cond_resched();
182170
goto retry;
183171
}
184172
new = old;
185173
new.k = 1;
186174
new.kh++;
187175
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
188-
read_unlock(&kvm->arch.sca_lock);
189176
}
190177

191178
static void ipte_unlock_siif(struct kvm *kvm)
192179
{
193180
union ipte_control old, new, *ic;
194181

195-
read_lock(&kvm->arch.sca_lock);
196-
ic = kvm_s390_get_ipte_control(kvm);
182+
ic = &kvm->arch.sca->ipte_control;
197183
old = READ_ONCE(*ic);
198184
do {
199185
new = old;
200186
new.kh--;
201187
if (!new.kh)
202188
new.k = 0;
203189
} while (!try_cmpxchg(&ic->val, &old.val, new.val));
204-
read_unlock(&kvm->arch.sca_lock);
205190
if (!new.kh)
206191
wake_up(&kvm->arch.ipte_wq);
207192
}

arch/s390/kvm/intercept.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -471,6 +471,9 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
471471
if (vcpu->arch.sie_block->ipa == 0xb256)
472472
return handle_sthyi(vcpu);
473473

474+
if (vcpu->kvm->arch.user_operexec)
475+
return -EOPNOTSUPP;
476+
474477
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
475478
return -EOPNOTSUPP;
476479
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));

arch/s390/kvm/interrupt.c

Lines changed: 17 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -45,70 +45,34 @@ static struct kvm_s390_gib *gib;
4545
/* handle external calls via sigp interpretation facility */
4646
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
4747
{
48-
int c, scn;
48+
struct esca_block *sca = vcpu->kvm->arch.sca;
49+
union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
4950

5051
if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
5152
return 0;
5253

5354
BUG_ON(!kvm_s390_use_sca_entries());
54-
read_lock(&vcpu->kvm->arch.sca_lock);
55-
if (vcpu->kvm->arch.use_esca) {
56-
struct esca_block *sca = vcpu->kvm->arch.sca;
57-
union esca_sigp_ctrl sigp_ctrl =
58-
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
59-
60-
c = sigp_ctrl.c;
61-
scn = sigp_ctrl.scn;
62-
} else {
63-
struct bsca_block *sca = vcpu->kvm->arch.sca;
64-
union bsca_sigp_ctrl sigp_ctrl =
65-
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
66-
67-
c = sigp_ctrl.c;
68-
scn = sigp_ctrl.scn;
69-
}
70-
read_unlock(&vcpu->kvm->arch.sca_lock);
7155

7256
if (src_id)
73-
*src_id = scn;
57+
*src_id = sigp_ctrl.scn;
7458

75-
return c;
59+
return sigp_ctrl.c;
7660
}
7761

7862
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
7963
{
64+
struct esca_block *sca = vcpu->kvm->arch.sca;
65+
union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
66+
union esca_sigp_ctrl old_val, new_val = {.scn = src_id, .c = 1};
8067
int expect, rc;
8168

8269
BUG_ON(!kvm_s390_use_sca_entries());
83-
read_lock(&vcpu->kvm->arch.sca_lock);
84-
if (vcpu->kvm->arch.use_esca) {
85-
struct esca_block *sca = vcpu->kvm->arch.sca;
86-
union esca_sigp_ctrl *sigp_ctrl =
87-
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
88-
union esca_sigp_ctrl new_val = {0}, old_val;
89-
90-
old_val = READ_ONCE(*sigp_ctrl);
91-
new_val.scn = src_id;
92-
new_val.c = 1;
93-
old_val.c = 0;
94-
95-
expect = old_val.value;
96-
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
97-
} else {
98-
struct bsca_block *sca = vcpu->kvm->arch.sca;
99-
union bsca_sigp_ctrl *sigp_ctrl =
100-
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
101-
union bsca_sigp_ctrl new_val = {0}, old_val;
10270

103-
old_val = READ_ONCE(*sigp_ctrl);
104-
new_val.scn = src_id;
105-
new_val.c = 1;
106-
old_val.c = 0;
71+
old_val = READ_ONCE(*sigp_ctrl);
72+
old_val.c = 0;
10773

108-
expect = old_val.value;
109-
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
110-
}
111-
read_unlock(&vcpu->kvm->arch.sca_lock);
74+
expect = old_val.value;
75+
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
11276

11377
if (rc != expect) {
11478
/* another external call is pending */
@@ -120,24 +84,14 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
12084

12185
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
12286
{
87+
struct esca_block *sca = vcpu->kvm->arch.sca;
88+
union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
89+
12390
if (!kvm_s390_use_sca_entries())
12491
return;
12592
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
126-
read_lock(&vcpu->kvm->arch.sca_lock);
127-
if (vcpu->kvm->arch.use_esca) {
128-
struct esca_block *sca = vcpu->kvm->arch.sca;
129-
union esca_sigp_ctrl *sigp_ctrl =
130-
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
131-
132-
WRITE_ONCE(sigp_ctrl->value, 0);
133-
} else {
134-
struct bsca_block *sca = vcpu->kvm->arch.sca;
135-
union bsca_sigp_ctrl *sigp_ctrl =
136-
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
13793

138-
WRITE_ONCE(sigp_ctrl->value, 0);
139-
}
140-
read_unlock(&vcpu->kvm->arch.sca_lock);
94+
WRITE_ONCE(sigp_ctrl->value, 0);
14195
}
14296

14397
int psw_extint_disabled(struct kvm_vcpu *vcpu)
@@ -1224,7 +1178,7 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
12241178
{
12251179
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
12261180

1227-
if (!sclp.has_sigpif)
1181+
if (!kvm_s390_use_sca_entries())
12281182
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
12291183

12301184
return sca_ext_call_pending(vcpu, NULL);
@@ -1549,7 +1503,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
15491503
if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
15501504
return -EINVAL;
15511505

1552-
if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
1506+
if (kvm_s390_use_sca_entries() && !kvm_s390_pv_cpu_get_handle(vcpu))
15531507
return sca_inject_ext_call(vcpu, src_id);
15541508

15551509
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))

0 commit comments

Comments
 (0)