Line data Source code
1 : /*
2 : * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 : * Licensed under the GPL
4 : */
5 :
6 : #include <linux/mm.h>
7 : #include <linux/sched.h>
8 : #include <linux/slab.h>
9 : #include <linux/syscalls.h>
10 : #include <linux/uaccess.h>
11 : #include <asm/unistd.h>
12 : #include <os.h>
13 : #include <skas.h>
14 : #include <sysdep/tls.h>
15 :
16 : static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
17 : {
18 0 : return syscall(__NR_modify_ldt, func, ptr, bytecount);
19 : }
20 :
21 0 : static long write_ldt_entry(struct mm_id *mm_idp, int func,
22 : struct user_desc *desc, void **addr, int done)
23 : {
24 : long res;
25 : void *stub_addr;
26 0 : res = syscall_stub_data(mm_idp, (unsigned long *)desc,
27 : (sizeof(*desc) + sizeof(long) - 1) &
28 : ~(sizeof(long) - 1),
29 : addr, &stub_addr);
30 0 : if (!res) {
31 0 : unsigned long args[] = { func,
32 0 : (unsigned long)stub_addr,
33 : sizeof(*desc),
34 : 0, 0, 0 };
35 0 : res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
36 : 0, addr, done);
37 : }
38 :
39 0 : return res;
40 : }
41 :
42 : /*
43 : * In skas mode, we hold our own ldt data in UML.
44 : * Thus, the code implementing sys_modify_ldt_skas
45 : * is very similar to (and mostly stolen from) sys_modify_ldt
46 : * for arch/i386/kernel/ldt.c
47 : * The routines copied and modified in part are:
48 : * - read_ldt
49 : * - read_default_ldt
50 : * - write_ldt
51 : * - sys_modify_ldt_skas
52 : */
53 :
54 0 : static int read_ldt(void __user * ptr, unsigned long bytecount)
55 : {
56 0 : int i, err = 0;
57 : unsigned long size;
58 0 : uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
59 :
60 0 : if (!ldt->entry_count)
61 : goto out;
62 0 : if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
63 0 : bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
64 0 : err = bytecount;
65 :
66 0 : mutex_lock(&ldt->lock);
67 0 : if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
68 0 : size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
69 0 : if (size > bytecount)
70 0 : size = bytecount;
71 0 : if (copy_to_user(ptr, ldt->u.entries, size))
72 0 : err = -EFAULT;
73 0 : bytecount -= size;
74 0 : ptr += size;
75 : }
76 : else {
77 0 : for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
78 0 : i++) {
79 0 : size = PAGE_SIZE;
80 0 : if (size > bytecount)
81 0 : size = bytecount;
82 0 : if (copy_to_user(ptr, ldt->u.pages[i], size)) {
83 : err = -EFAULT;
84 : break;
85 : }
86 0 : bytecount -= size;
87 0 : ptr += size;
88 : }
89 : }
90 0 : mutex_unlock(&ldt->lock);
91 :
92 0 : if (bytecount == 0 || err == -EFAULT)
93 : goto out;
94 :
95 0 : if (clear_user(ptr, bytecount))
96 0 : err = -EFAULT;
97 :
98 : out:
99 0 : return err;
100 : }
101 :
102 : static int read_default_ldt(void __user * ptr, unsigned long bytecount)
103 : {
104 : int err;
105 :
106 0 : if (bytecount > 5*LDT_ENTRY_SIZE)
107 0 : bytecount = 5*LDT_ENTRY_SIZE;
108 :
109 0 : err = bytecount;
110 : /*
111 : * UML doesn't support lcall7 and lcall27.
112 : * So, we don't really have a default ldt, but emulate
113 : * an empty ldt of common host default ldt size.
114 : */
115 0 : if (clear_user(ptr, bytecount))
116 0 : err = -EFAULT;
117 :
118 : return err;
119 : }
120 :
121 0 : static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
122 : {
123 0 : uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
124 0 : struct mm_id * mm_idp = ¤t->mm->context.id;
125 : int i, err;
126 : struct user_desc ldt_info;
127 : struct ldt_entry entry0, *ldt_p;
128 0 : void *addr = NULL;
129 :
130 0 : err = -EINVAL;
131 0 : if (bytecount != sizeof(ldt_info))
132 : goto out;
133 0 : err = -EFAULT;
134 0 : if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
135 : goto out;
136 :
137 0 : err = -EINVAL;
138 0 : if (ldt_info.entry_number >= LDT_ENTRIES)
139 : goto out;
140 0 : if (ldt_info.contents == 3) {
141 0 : if (func == 1)
142 : goto out;
143 0 : if (ldt_info.seg_not_present == 0)
144 : goto out;
145 : }
146 :
147 0 : mutex_lock(&ldt->lock);
148 :
149 0 : err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
150 0 : if (err)
151 : goto out_unlock;
152 :
153 0 : if (ldt_info.entry_number >= ldt->entry_count &&
154 : ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
155 0 : for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
156 0 : i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
157 0 : i++) {
158 0 : if (i == 0)
159 0 : memcpy(&entry0, ldt->u.entries,
160 : sizeof(entry0));
161 0 : ldt->u.pages[i] = (struct ldt_entry *)
162 0 : __get_free_page(GFP_KERNEL|__GFP_ZERO);
163 0 : if (!ldt->u.pages[i]) {
164 0 : err = -ENOMEM;
165 : /* Undo the change in host */
166 0 : memset(&ldt_info, 0, sizeof(ldt_info));
167 0 : write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
168 0 : goto out_unlock;
169 : }
170 0 : if (i == 0) {
171 0 : memcpy(ldt->u.pages[0], &entry0,
172 : sizeof(entry0));
173 0 : memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
174 : sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
175 : }
176 0 : ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
177 : }
178 : }
179 0 : if (ldt->entry_count <= ldt_info.entry_number)
180 0 : ldt->entry_count = ldt_info.entry_number + 1;
181 :
182 0 : if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
183 0 : ldt_p = ldt->u.entries + ldt_info.entry_number;
184 : else
185 0 : ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
186 0 : ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
187 :
188 0 : if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
189 0 : (func == 1 || LDT_empty(&ldt_info))) {
190 0 : ldt_p->a = 0;
191 0 : ldt_p->b = 0;
192 : }
193 : else{
194 0 : if (func == 1)
195 0 : ldt_info.useable = 0;
196 0 : ldt_p->a = LDT_entry_a(&ldt_info);
197 0 : ldt_p->b = LDT_entry_b(&ldt_info);
198 : }
199 : err = 0;
200 :
201 : out_unlock:
202 0 : mutex_unlock(&ldt->lock);
203 : out:
204 0 : return err;
205 : }
206 :
207 0 : static long do_modify_ldt_skas(int func, void __user *ptr,
208 : unsigned long bytecount)
209 : {
210 0 : int ret = -ENOSYS;
211 :
212 0 : switch (func) {
213 : case 0:
214 0 : ret = read_ldt(ptr, bytecount);
215 0 : break;
216 : case 1:
217 : case 0x11:
218 0 : ret = write_ldt(ptr, bytecount, func);
219 0 : break;
220 : case 2:
221 : ret = read_default_ldt(ptr, bytecount);
222 : break;
223 : }
224 0 : return ret;
225 : }
226 :
227 : static DEFINE_SPINLOCK(host_ldt_lock);
228 : static short dummy_list[9] = {0, -1};
229 : static short * host_ldt_entries = NULL;
230 :
231 0 : static void ldt_get_host_info(void)
232 : {
233 : long ret;
234 : struct ldt_entry * ldt;
235 : short *tmp;
236 : int i, size, k, order;
237 :
238 0 : spin_lock(&host_ldt_lock);
239 :
240 0 : if (host_ldt_entries != NULL) {
241 : spin_unlock(&host_ldt_lock);
242 : return;
243 : }
244 0 : host_ldt_entries = dummy_list+1;
245 :
246 0 : spin_unlock(&host_ldt_lock);
247 :
248 0 : for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
249 : ;
250 :
251 0 : ldt = (struct ldt_entry *)
252 0 : __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
253 0 : if (ldt == NULL) {
254 0 : printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
255 : "for host ldt\n");
256 0 : return;
257 : }
258 :
259 0 : ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
260 0 : if (ret < 0) {
261 0 : printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
262 0 : goto out_free;
263 : }
264 0 : if (ret == 0) {
265 : /* default_ldt is active, simply write an empty entry 0 */
266 0 : host_ldt_entries = dummy_list;
267 0 : goto out_free;
268 : }
269 :
270 0 : for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
271 0 : if (ldt[i].a != 0 || ldt[i].b != 0)
272 0 : size++;
273 : }
274 :
275 0 : if (size < ARRAY_SIZE(dummy_list))
276 0 : host_ldt_entries = dummy_list;
277 : else {
278 0 : size = (size + 1) * sizeof(dummy_list[0]);
279 0 : tmp = kmalloc(size, GFP_KERNEL);
280 0 : if (tmp == NULL) {
281 0 : printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
282 : "host ldt list\n");
283 0 : goto out_free;
284 : }
285 0 : host_ldt_entries = tmp;
286 : }
287 :
288 0 : for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
289 0 : if (ldt[i].a != 0 || ldt[i].b != 0)
290 0 : host_ldt_entries[k++] = i;
291 : }
292 0 : host_ldt_entries[k] = -1;
293 :
294 : out_free:
295 0 : free_pages((unsigned long)ldt, order);
296 : }
297 :
298 0 : long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
299 : {
300 : struct user_desc desc;
301 : short * num_p;
302 : int i;
303 0 : long page, err=0;
304 0 : void *addr = NULL;
305 :
306 :
307 0 : mutex_init(&new_mm->arch.ldt.lock);
308 :
309 0 : if (!from_mm) {
310 0 : memset(&desc, 0, sizeof(desc));
311 : /*
312 : * Now we try to retrieve info about the ldt, we
313 : * inherited from the host. All ldt-entries found
314 : * will be reset in the following loop
315 : */
316 0 : ldt_get_host_info();
317 0 : for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
318 0 : desc.entry_number = *num_p;
319 0 : err = write_ldt_entry(&new_mm->id, 1, &desc,
320 0 : &addr, *(num_p + 1) == -1);
321 0 : if (err)
322 : break;
323 : }
324 0 : new_mm->arch.ldt.entry_count = 0;
325 :
326 0 : goto out;
327 : }
328 :
329 : /*
330 : * Our local LDT is used to supply the data for
331 : * modify_ldt(READLDT), if PTRACE_LDT isn't available,
332 : * i.e., we have to use the stub for modify_ldt, which
333 : * can't handle the big read buffer of up to 64kB.
334 : */
335 0 : mutex_lock(&from_mm->arch.ldt.lock);
336 0 : if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
337 0 : memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
338 : sizeof(new_mm->arch.ldt.u.entries));
339 : else {
340 0 : i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
341 0 : while (i-->0) {
342 0 : page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
343 0 : if (!page) {
344 : err = -ENOMEM;
345 : break;
346 : }
347 0 : new_mm->arch.ldt.u.pages[i] =
348 0 : (struct ldt_entry *) page;
349 0 : memcpy(new_mm->arch.ldt.u.pages[i],
350 0 : from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
351 : }
352 : }
353 0 : new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
354 0 : mutex_unlock(&from_mm->arch.ldt.lock);
355 :
356 : out:
357 0 : return err;
358 : }
359 :
360 :
361 0 : void free_ldt(struct mm_context *mm)
362 : {
363 : int i;
364 :
365 0 : if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
366 0 : i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
367 0 : while (i-- > 0)
368 0 : free_page((long) mm->arch.ldt.u.pages[i]);
369 : }
370 0 : mm->arch.ldt.entry_count = 0;
371 0 : }
372 :
373 0 : SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
374 : unsigned long , bytecount)
375 : {
376 : /* See non-um modify_ldt() for why we do this cast */
377 0 : return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
378 : }
|