s390/dma: fix mapping_error detection
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / kvm / intercept.c
1 /*
2 * in-kernel handling for sie intercepts
3 *
4 * Copyright IBM Corp. 2008, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17
18 #include <asm/kvm_host.h>
19
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 #include "trace.h"
23 #include "trace-s390.h"
24
25 static int handle_lctlg(struct kvm_vcpu *vcpu)
26 {
27 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
28 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
29 u64 useraddr;
30 int reg, rc;
31
32 vcpu->stat.instruction_lctlg++;
33
34 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
35
36 if (useraddr & 7)
37 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
38
39 reg = reg1;
40
41 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
42 useraddr);
43 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
44
45 do {
46 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
47 (u64 __user *) useraddr);
48 if (rc)
49 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 useraddr += 8;
51 if (reg == reg3)
52 break;
53 reg = (reg + 1) % 16;
54 } while (1);
55 return 0;
56 }
57
58 static int handle_lctl(struct kvm_vcpu *vcpu)
59 {
60 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
61 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
62 u64 useraddr;
63 u32 val = 0;
64 int reg, rc;
65
66 vcpu->stat.instruction_lctl++;
67
68 useraddr = kvm_s390_get_base_disp_rs(vcpu);
69
70 if (useraddr & 3)
71 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
72
73 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
74 useraddr);
75 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
76
77 reg = reg1;
78 do {
79 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
80 if (rc)
81 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
82 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
83 vcpu->arch.sie_block->gcr[reg] |= val;
84 useraddr += 4;
85 if (reg == reg3)
86 break;
87 reg = (reg + 1) % 16;
88 } while (1);
89 return 0;
90 }
91
92 static const intercept_handler_t eb_handlers[256] = {
93 [0x2f] = handle_lctlg,
94 [0x8a] = kvm_s390_handle_priv_eb,
95 };
96
97 static int handle_eb(struct kvm_vcpu *vcpu)
98 {
99 intercept_handler_t handler;
100
101 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
102 if (handler)
103 return handler(vcpu);
104 return -EOPNOTSUPP;
105 }
106
107 static const intercept_handler_t instruction_handlers[256] = {
108 [0x01] = kvm_s390_handle_01,
109 [0x82] = kvm_s390_handle_lpsw,
110 [0x83] = kvm_s390_handle_diag,
111 [0xae] = kvm_s390_handle_sigp,
112 [0xb2] = kvm_s390_handle_b2,
113 [0xb7] = handle_lctl,
114 [0xb9] = kvm_s390_handle_b9,
115 [0xe5] = kvm_s390_handle_e5,
116 [0xeb] = handle_eb,
117 };
118
119 static int handle_noop(struct kvm_vcpu *vcpu)
120 {
121 switch (vcpu->arch.sie_block->icptcode) {
122 case 0x0:
123 vcpu->stat.exit_null++;
124 break;
125 case 0x10:
126 vcpu->stat.exit_external_request++;
127 break;
128 case 0x14:
129 vcpu->stat.exit_external_interrupt++;
130 break;
131 default:
132 break; /* nothing */
133 }
134 return 0;
135 }
136
137 static int handle_stop(struct kvm_vcpu *vcpu)
138 {
139 int rc = 0;
140
141 vcpu->stat.exit_stop_request++;
142 spin_lock_bh(&vcpu->arch.local_int.lock);
143
144 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
145
146 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
147 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
148 rc = SIE_INTERCEPT_RERUNVCPU;
149 vcpu->run->exit_reason = KVM_EXIT_INTR;
150 }
151
152 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
153 atomic_set_mask(CPUSTAT_STOPPED,
154 &vcpu->arch.sie_block->cpuflags);
155 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
156 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
157 rc = -EOPNOTSUPP;
158 }
159
160 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
161 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
162 /* store status must be called unlocked. Since local_int.lock
163 * only protects local_int.* and not guest memory we can give
164 * up the lock here */
165 spin_unlock_bh(&vcpu->arch.local_int.lock);
166 rc = kvm_s390_vcpu_store_status(vcpu,
167 KVM_S390_STORE_STATUS_NOADDR);
168 if (rc >= 0)
169 rc = -EOPNOTSUPP;
170 } else
171 spin_unlock_bh(&vcpu->arch.local_int.lock);
172 return rc;
173 }
174
175 static int handle_validity(struct kvm_vcpu *vcpu)
176 {
177 unsigned long vmaddr;
178 int viwhy = vcpu->arch.sie_block->ipb >> 16;
179 int rc;
180
181 vcpu->stat.exit_validity++;
182 trace_kvm_s390_intercept_validity(vcpu, viwhy);
183 if (viwhy == 0x37) {
184 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
185 vcpu->arch.gmap);
186 if (IS_ERR_VALUE(vmaddr)) {
187 rc = -EOPNOTSUPP;
188 goto out;
189 }
190 rc = fault_in_pages_writeable((char __user *) vmaddr,
191 PAGE_SIZE);
192 if (rc) {
193 /* user will receive sigsegv, exit to user */
194 rc = -EOPNOTSUPP;
195 goto out;
196 }
197 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
198 vcpu->arch.gmap);
199 if (IS_ERR_VALUE(vmaddr)) {
200 rc = -EOPNOTSUPP;
201 goto out;
202 }
203 rc = fault_in_pages_writeable((char __user *) vmaddr,
204 PAGE_SIZE);
205 if (rc) {
206 /* user will receive sigsegv, exit to user */
207 rc = -EOPNOTSUPP;
208 goto out;
209 }
210 } else
211 rc = -EOPNOTSUPP;
212
213 out:
214 if (rc)
215 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
216 viwhy);
217 return rc;
218 }
219
220 static int handle_instruction(struct kvm_vcpu *vcpu)
221 {
222 intercept_handler_t handler;
223
224 vcpu->stat.exit_instruction++;
225 trace_kvm_s390_intercept_instruction(vcpu,
226 vcpu->arch.sie_block->ipa,
227 vcpu->arch.sie_block->ipb);
228 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
229 if (handler)
230 return handler(vcpu);
231 return -EOPNOTSUPP;
232 }
233
234 static int handle_prog(struct kvm_vcpu *vcpu)
235 {
236 vcpu->stat.exit_program_interruption++;
237 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
238 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
239 }
240
241 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
242 {
243 int rc, rc2;
244
245 vcpu->stat.exit_instr_and_program++;
246 rc = handle_instruction(vcpu);
247 rc2 = handle_prog(vcpu);
248
249 if (rc == -EOPNOTSUPP)
250 vcpu->arch.sie_block->icptcode = 0x04;
251 if (rc)
252 return rc;
253 return rc2;
254 }
255
256 static const intercept_handler_t intercept_funcs[] = {
257 [0x00 >> 2] = handle_noop,
258 [0x04 >> 2] = handle_instruction,
259 [0x08 >> 2] = handle_prog,
260 [0x0C >> 2] = handle_instruction_and_prog,
261 [0x10 >> 2] = handle_noop,
262 [0x14 >> 2] = handle_noop,
263 [0x18 >> 2] = handle_noop,
264 [0x1C >> 2] = kvm_s390_handle_wait,
265 [0x20 >> 2] = handle_validity,
266 [0x28 >> 2] = handle_stop,
267 };
268
269 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
270 {
271 intercept_handler_t func;
272 u8 code = vcpu->arch.sie_block->icptcode;
273
274 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
275 return -EOPNOTSUPP;
276 func = intercept_funcs[code >> 2];
277 if (func)
278 return func(vcpu);
279 return -EOPNOTSUPP;
280 }