v2.4.9.10 -> v2.4.9.11
[opensuse:kernel.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/unistd.h>
18 #include <linux/smp_lock.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/completion.h>
22 #include <linux/personality.h>
23
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/uaccess.h>
27 #include <asm/mmu_context.h>
28
29 /* The idle threads do not count.. */
30 int nr_threads;
31 int nr_running;
32
33 int max_threads;
34 unsigned long total_forks;      /* Handle normal Linux uptimes. */
35 int last_pid;
36
37 struct task_struct *pidhash[PIDHASH_SZ];
38
39 void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
40 {
41         unsigned long flags;
42
43         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
44         wq_write_lock_irqsave(&q->lock, flags);
45         __add_wait_queue(q, wait);
46         wq_write_unlock_irqrestore(&q->lock, flags);
47 }
48
49 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
50 {
51         unsigned long flags;
52
53         wait->flags |= WQ_FLAG_EXCLUSIVE;
54         wq_write_lock_irqsave(&q->lock, flags);
55         __add_wait_queue_tail(q, wait);
56         wq_write_unlock_irqrestore(&q->lock, flags);
57 }
58
59 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
60 {
61         unsigned long flags;
62
63         wq_write_lock_irqsave(&q->lock, flags);
64         __remove_wait_queue(q, wait);
65         wq_write_unlock_irqrestore(&q->lock, flags);
66 }
67
68 void __init fork_init(unsigned long mempages)
69 {
70         /*
71          * The default maximum number of threads is set to a safe
72          * value: the thread structures can take up at most half
73          * of memory.
74          */
75         max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 16;
76
77         init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
78         init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
79 }
80
81 /* Protects next_safe and last_pid. */
82 spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;
83
84 static int get_pid(unsigned long flags)
85 {
86         static int next_safe = PID_MAX;
87         struct task_struct *p;
88
89         if (flags & CLONE_PID)
90                 return current->pid;
91
92         spin_lock(&lastpid_lock);
93         if((++last_pid) & 0xffff8000) {
94                 last_pid = 300;         /* Skip daemons etc. */
95                 goto inside;
96         }
97         if(last_pid >= next_safe) {
98 inside:
99                 next_safe = PID_MAX;
100                 read_lock(&tasklist_lock);
101         repeat:
102                 for_each_task(p) {
103                         if(p->pid == last_pid   ||
104                            p->pgrp == last_pid  ||
105                            p->tgid == last_pid  ||
106                            p->session == last_pid) {
107                                 if(++last_pid >= next_safe) {
108                                         if(last_pid & 0xffff8000)
109                                                 last_pid = 300;
110                                         next_safe = PID_MAX;
111                                 }
112                                 goto repeat;
113                         }
114                         if(p->pid > last_pid && next_safe > p->pid)
115                                 next_safe = p->pid;
116                         if(p->pgrp > last_pid && next_safe > p->pgrp)
117                                 next_safe = p->pgrp;
118                         if(p->session > last_pid && next_safe > p->session)
119                                 next_safe = p->session;
120                 }
121                 read_unlock(&tasklist_lock);
122         }
123         spin_unlock(&lastpid_lock);
124
125         return last_pid;
126 }
127
128 static inline int dup_mmap(struct mm_struct * mm)
129 {
130         struct vm_area_struct * mpnt, *tmp, **pprev;
131         int retval;
132
133         flush_cache_mm(current->mm);
134         mm->locked_vm = 0;
135         mm->mmap = NULL;
136         mm->mmap_cache = NULL;
137         mm->map_count = 0;
138         mm->rss = 0;
139         mm->cpu_vm_mask = 0;
140         mm->swap_address = 0;
141         pprev = &mm->mmap;
142
143         /*
144          * Add it to the mmlist after the parent.
145          * Doing it this way means that we can order the list,
146          * and fork() won't mess up the ordering significantly.
147          * Add it first so that swapoff can see any swap entries.
148          */
149         spin_lock(&mmlist_lock);
150         list_add(&mm->mmlist, &current->mm->mmlist);
151         mmlist_nr++;
152         spin_unlock(&mmlist_lock);
153
154         for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
155                 struct file *file;
156
157                 retval = -ENOMEM;
158                 if(mpnt->vm_flags & VM_DONTCOPY)
159                         continue;
160                 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
161                 if (!tmp)
162                         goto fail_nomem;
163                 *tmp = *mpnt;
164                 tmp->vm_flags &= ~VM_LOCKED;
165                 tmp->vm_mm = mm;
166                 tmp->vm_next = NULL;
167                 file = tmp->vm_file;
168                 if (file) {
169                         struct inode *inode = file->f_dentry->d_inode;
170                         get_file(file);
171                         if (tmp->vm_flags & VM_DENYWRITE)
172                                 atomic_dec(&inode->i_writecount);
173       
174                         /* insert tmp into the share list, just after mpnt */
175                         spin_lock(&inode->i_mapping->i_shared_lock);
176                         if((tmp->vm_next_share = mpnt->vm_next_share) != NULL)
177                                 mpnt->vm_next_share->vm_pprev_share =
178                                         &tmp->vm_next_share;
179                         mpnt->vm_next_share = tmp;
180                         tmp->vm_pprev_share = &mpnt->vm_next_share;
181                         spin_unlock(&inode->i_mapping->i_shared_lock);
182                 }
183
184                 /*
185                  * Link in the new vma and copy the page table entries:
186                  * link in first so that swapoff can see swap entries.
187                  */
188                 spin_lock(&mm->page_table_lock);
189                 *pprev = tmp;
190                 pprev = &tmp->vm_next;
191                 mm->map_count++;
192                 retval = copy_page_range(mm, current->mm, tmp);
193                 spin_unlock(&mm->page_table_lock);
194
195                 if (tmp->vm_ops && tmp->vm_ops->open)
196                         tmp->vm_ops->open(tmp);
197
198                 if (retval)
199                         goto fail_nomem;
200         }
201         retval = 0;
202         build_mmap_rb(mm);
203
204 fail_nomem:
205         flush_tlb_mm(current->mm);
206         return retval;
207 }
208
209 spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
210 int mmlist_nr;
211
212 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
213 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
214
215 static struct mm_struct * mm_init(struct mm_struct * mm)
216 {
217         atomic_set(&mm->mm_users, 1);
218         atomic_set(&mm->mm_count, 1);
219         init_rwsem(&mm->mmap_sem);
220         mm->page_table_lock = SPIN_LOCK_UNLOCKED;
221         mm->pgd = pgd_alloc(mm);
222         if (mm->pgd)
223                 return mm;
224         free_mm(mm);
225         return NULL;
226 }
227         
228
229 /*
230  * Allocate and initialize an mm_struct.
231  */
232 struct mm_struct * mm_alloc(void)
233 {
234         struct mm_struct * mm;
235
236         mm = allocate_mm();
237         if (mm) {
238                 memset(mm, 0, sizeof(*mm));
239                 return mm_init(mm);
240         }
241         return NULL;
242 }
243
244 /*
245  * Called when the last reference to the mm
246  * is dropped: either by a lazy thread or by
247  * mmput. Free the page directory and the mm.
248  */
249 inline void __mmdrop(struct mm_struct *mm)
250 {
251         if (mm == &init_mm) BUG();
252         pgd_free(mm->pgd);
253         destroy_context(mm);
254         free_mm(mm);
255 }
256
257 /*
258  * Decrement the use count and release all resources for an mm.
259  */
260 void mmput(struct mm_struct *mm)
261 {
262         if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
263                 extern struct mm_struct *swap_mm;
264                 if (swap_mm == mm)
265                         swap_mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist);
266                 list_del(&mm->mmlist);
267                 mmlist_nr--;
268                 spin_unlock(&mmlist_lock);
269                 exit_mmap(mm);
270                 mmdrop(mm);
271         }
272 }
273
274 /* Please note the differences between mmput and mm_release.
275  * mmput is called whenever we stop holding onto a mm_struct,
276  * error success whatever.
277  *
278  * mm_release is called after a mm_struct has been removed
279  * from the current process.
280  *
281  * This difference is important for error handling, when we
282  * only half set up a mm_struct for a new process and need to restore
283  * the old one.  Because we mmput the new mm_struct before
284  * restoring the old one. . .
285  * Eric Biederman 10 January 1998
286  */
287 void mm_release(void)
288 {
289         struct task_struct *tsk = current;
290         struct completion *vfork_done = tsk->vfork_done;
291
292         /* notify parent sleeping on vfork() */
293         if (vfork_done) {
294                 tsk->vfork_done = NULL;
295                 complete(vfork_done);
296         }
297 }
298
299 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
300 {
301         struct mm_struct * mm, *oldmm;
302         int retval;
303
304         tsk->min_flt = tsk->maj_flt = 0;
305         tsk->cmin_flt = tsk->cmaj_flt = 0;
306         tsk->nswap = tsk->cnswap = 0;
307
308         tsk->mm = NULL;
309         tsk->active_mm = NULL;
310
311         /*
312          * Are we cloning a kernel thread?
313          *
314          * We need to steal a active VM for that..
315          */
316         oldmm = current->mm;
317         if (!oldmm)
318                 return 0;
319
320         if (clone_flags & CLONE_VM) {
321                 atomic_inc(&oldmm->mm_users);
322                 mm = oldmm;
323                 goto good_mm;
324         }
325
326         retval = -ENOMEM;
327         mm = allocate_mm();
328         if (!mm)
329                 goto fail_nomem;
330
331         /* Copy the current MM stuff.. */
332         memcpy(mm, oldmm, sizeof(*mm));
333         if (!mm_init(mm))
334                 goto fail_nomem;
335
336         down_write(&oldmm->mmap_sem);
337         retval = dup_mmap(mm);
338         up_write(&oldmm->mmap_sem);
339
340         if (retval)
341                 goto free_pt;
342
343         /*
344          * child gets a private LDT (if there was an LDT in the parent)
345          */
346         copy_segments(tsk, mm);
347
348         if (init_new_context(tsk,mm))
349                 goto free_pt;
350
351 good_mm:
352         tsk->mm = mm;
353         tsk->active_mm = mm;
354         return 0;
355
356 free_pt:
357         mmput(mm);
358 fail_nomem:
359         return retval;
360 }
361
362 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
363 {
364         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
365         /* We don't need to lock fs - think why ;-) */
366         if (fs) {
367                 atomic_set(&fs->count, 1);
368                 fs->lock = RW_LOCK_UNLOCKED;
369                 fs->umask = old->umask;
370                 read_lock(&old->lock);
371                 fs->rootmnt = mntget(old->rootmnt);
372                 fs->root = dget(old->root);
373                 fs->pwdmnt = mntget(old->pwdmnt);
374                 fs->pwd = dget(old->pwd);
375                 if (old->altroot) {
376                         fs->altrootmnt = mntget(old->altrootmnt);
377                         fs->altroot = dget(old->altroot);
378                 } else {
379                         fs->altrootmnt = NULL;
380                         fs->altroot = NULL;
381                 }       
382                 read_unlock(&old->lock);
383         }
384         return fs;
385 }
386
387 struct fs_struct *copy_fs_struct(struct fs_struct *old)
388 {
389         return __copy_fs_struct(old);
390 }
391
392 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
393 {
394         if (clone_flags & CLONE_FS) {
395                 atomic_inc(&current->fs->count);
396                 return 0;
397         }
398         tsk->fs = __copy_fs_struct(current->fs);
399         if (!tsk->fs)
400                 return -1;
401         return 0;
402 }
403
404 static int count_open_files(struct files_struct *files, int size)
405 {
406         int i;
407         
408         /* Find the last open fd */
409         for (i = size/(8*sizeof(long)); i > 0; ) {
410                 if (files->open_fds->fds_bits[--i])
411                         break;
412         }
413         i = (i+1) * 8 * sizeof(long);
414         return i;
415 }
416
417 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
418 {
419         struct files_struct *oldf, *newf;
420         struct file **old_fds, **new_fds;
421         int open_files, nfds, size, i, error = 0;
422
423         /*
424          * A background process may not have any files ...
425          */
426         oldf = current->files;
427         if (!oldf)
428                 goto out;
429
430         if (clone_flags & CLONE_FILES) {
431                 atomic_inc(&oldf->count);
432                 goto out;
433         }
434
435         tsk->files = NULL;
436         error = -ENOMEM;
437         newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
438         if (!newf) 
439                 goto out;
440
441         atomic_set(&newf->count, 1);
442
443         newf->file_lock     = RW_LOCK_UNLOCKED;
444         newf->next_fd       = 0;
445         newf->max_fds       = NR_OPEN_DEFAULT;
446         newf->max_fdset     = __FD_SETSIZE;
447         newf->close_on_exec = &newf->close_on_exec_init;
448         newf->open_fds      = &newf->open_fds_init;
449         newf->fd            = &newf->fd_array[0];
450
451         /* We don't yet have the oldf readlock, but even if the old
452            fdset gets grown now, we'll only copy up to "size" fds */
453         size = oldf->max_fdset;
454         if (size > __FD_SETSIZE) {
455                 newf->max_fdset = 0;
456                 write_lock(&newf->file_lock);
457                 error = expand_fdset(newf, size-1);
458                 write_unlock(&newf->file_lock);
459                 if (error)
460                         goto out_release;
461         }
462         read_lock(&oldf->file_lock);
463
464         open_files = count_open_files(oldf, size);
465
466         /*
467          * Check whether we need to allocate a larger fd array.
468          * Note: we're not a clone task, so the open count won't
469          * change.
470          */
471         nfds = NR_OPEN_DEFAULT;
472         if (open_files > nfds) {
473                 read_unlock(&oldf->file_lock);
474                 newf->max_fds = 0;
475                 write_lock(&newf->file_lock);
476                 error = expand_fd_array(newf, open_files-1);
477                 write_unlock(&newf->file_lock);
478                 if (error) 
479                         goto out_release;
480                 nfds = newf->max_fds;
481                 read_lock(&oldf->file_lock);
482         }
483
484         old_fds = oldf->fd;
485         new_fds = newf->fd;
486
487         memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8);
488         memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8);
489
490         for (i = open_files; i != 0; i--) {
491                 struct file *f = *old_fds++;
492                 if (f)
493                         get_file(f);
494                 *new_fds++ = f;
495         }
496         read_unlock(&oldf->file_lock);
497
498         /* compute the remainder to be cleared */
499         size = (newf->max_fds - open_files) * sizeof(struct file *);
500
501         /* This is long word aligned thus could use a optimized version */ 
502         memset(new_fds, 0, size); 
503
504         if (newf->max_fdset > open_files) {
505                 int left = (newf->max_fdset-open_files)/8;
506                 int start = open_files / (8 * sizeof(unsigned long));
507                 
508                 memset(&newf->open_fds->fds_bits[start], 0, left);
509                 memset(&newf->close_on_exec->fds_bits[start], 0, left);
510         }
511
512         tsk->files = newf;
513         error = 0;
514 out:
515         return error;
516
517 out_release:
518         free_fdset (newf->close_on_exec, newf->max_fdset);
519         free_fdset (newf->open_fds, newf->max_fdset);
520         kmem_cache_free(files_cachep, newf);
521         goto out;
522 }
523
524 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
525 {
526         struct signal_struct *sig;
527
528         if (clone_flags & CLONE_SIGHAND) {
529                 atomic_inc(&current->sig->count);
530                 return 0;
531         }
532         sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
533         tsk->sig = sig;
534         if (!sig)
535                 return -1;
536         spin_lock_init(&sig->siglock);
537         atomic_set(&sig->count, 1);
538         memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
539         return 0;
540 }
541
542 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
543 {
544         unsigned long new_flags = p->flags;
545
546         new_flags &= ~(PF_SUPERPRIV | PF_USEDFPU);
547         new_flags |= PF_FORKNOEXEC;
548         if (!(clone_flags & CLONE_PTRACE))
549                 p->ptrace = 0;
550         p->flags = new_flags;
551 }
552
553 /*
554  *  Ok, this is the main fork-routine. It copies the system process
555  * information (task[nr]) and sets up the necessary registers. It also
556  * copies the data segment in its entirety.  The "stack_start" and
557  * "stack_top" arguments are simply passed along to the platform
558  * specific copy_thread() routine.  Most platforms ignore stack_top.
559  * For an example that's using stack_top, see
560  * arch/ia64/kernel/process.c.
561  */
562 int do_fork(unsigned long clone_flags, unsigned long stack_start,
563             struct pt_regs *regs, unsigned long stack_size)
564 {
565         int retval;
566         struct task_struct *p;
567         struct completion vfork;
568
569         retval = -EPERM;
570
571         /* 
572          * CLONE_PID is only allowed for the initial SMP swapper
573          * calls
574          */
575         if (clone_flags & CLONE_PID) {
576                 if (current->pid)
577                         goto fork_out;
578         }
579
580         retval = -ENOMEM;
581         p = alloc_task_struct();
582         if (!p)
583                 goto fork_out;
584
585         *p = *current;
586
587         retval = -EAGAIN;
588         if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur)
589                 goto bad_fork_free;
590
591         atomic_inc(&p->user->__count);
592         atomic_inc(&p->user->processes);
593
594         /*
595          * Counter increases are protected by
596          * the kernel lock so nr_threads can't
597          * increase under us (but it may decrease).
598          */
599         if (nr_threads >= max_threads)
600                 goto bad_fork_cleanup_count;
601         
602         get_exec_domain(p->exec_domain);
603
604         if (p->binfmt && p->binfmt->module)
605                 __MOD_INC_USE_COUNT(p->binfmt->module);
606
607         p->did_exec = 0;
608         p->swappable = 0;
609         p->state = TASK_UNINTERRUPTIBLE;
610
611         copy_flags(clone_flags, p);
612         p->pid = get_pid(clone_flags);
613
614         p->run_list.next = NULL;
615         p->run_list.prev = NULL;
616
617         p->p_cptr = NULL;
618         init_waitqueue_head(&p->wait_chldexit);
619         p->vfork_done = NULL;
620         if (clone_flags & CLONE_VFORK) {
621                 p->vfork_done = &vfork;
622                 init_completion(&vfork);
623         }
624         spin_lock_init(&p->alloc_lock);
625
626         p->sigpending = 0;
627         init_sigpending(&p->pending);
628
629         p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
630         p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
631         init_timer(&p->real_timer);
632         p->real_timer.data = (unsigned long) p;
633
634         p->leader = 0;          /* session leadership doesn't inherit */
635         p->tty_old_pgrp = 0;
636         p->times.tms_utime = p->times.tms_stime = 0;
637         p->times.tms_cutime = p->times.tms_cstime = 0;
638 #ifdef CONFIG_SMP
639         {
640                 int i;
641                 p->has_cpu = 0;
642                 p->processor = current->processor;
643                 /* ?? should we just memset this ?? */
644                 for(i = 0; i < smp_num_cpus; i++)
645                         p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
646                 spin_lock_init(&p->sigmask_lock);
647         }
648 #endif
649         p->lock_depth = -1;             /* -1 = no lock */
650         p->start_time = jiffies;
651
652         INIT_LIST_HEAD(&p->local_pages);
653
654         retval = -ENOMEM;
655         /* copy all the process information */
656         if (copy_files(clone_flags, p))
657                 goto bad_fork_cleanup;
658         if (copy_fs(clone_flags, p))
659                 goto bad_fork_cleanup_files;
660         if (copy_sighand(clone_flags, p))
661                 goto bad_fork_cleanup_fs;
662         if (copy_mm(clone_flags, p))
663                 goto bad_fork_cleanup_sighand;
664         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
665         if (retval)
666                 goto bad_fork_cleanup_mm;
667         p->semundo = NULL;
668         
669         /* Our parent execution domain becomes current domain
670            These must match for thread signalling to apply */
671            
672         p->parent_exec_id = p->self_exec_id;
673
674         /* ok, now we should be set up.. */
675         p->swappable = 1;
676         p->exit_signal = clone_flags & CSIGNAL;
677         p->pdeath_signal = 0;
678
679         /*
680          * "share" dynamic priority between parent and child, thus the
681          * total amount of dynamic priorities in the system doesnt change,
682          * more scheduling fairness. This is only important in the first
683          * timeslice, on the long run the scheduling behaviour is unchanged.
684          */
685         p->counter = (current->counter + 1) >> 1;
686         current->counter >>= 1;
687         if (!current->counter)
688                 current->need_resched = 1;
689
690         /*
691          * Ok, add it to the run-queues and make it
692          * visible to the rest of the system.
693          *
694          * Let it rip!
695          */
696         retval = p->pid;
697         p->tgid = retval;
698         INIT_LIST_HEAD(&p->thread_group);
699
700         /* Need tasklist lock for parent etc handling! */
701         write_lock_irq(&tasklist_lock);
702
703         /* CLONE_PARENT and CLONE_THREAD re-use the old parent */
704         p->p_opptr = current->p_opptr;
705         p->p_pptr = current->p_pptr;
706         if (!(clone_flags & (CLONE_PARENT | CLONE_THREAD))) {
707                 p->p_opptr = current;
708                 if (!(p->ptrace & PT_PTRACED))
709                         p->p_pptr = current;
710         }
711
712         if (clone_flags & CLONE_THREAD) {
713                 p->tgid = current->tgid;
714                 list_add(&p->thread_group, &current->thread_group);
715         }
716
717         SET_LINKS(p);
718         hash_pid(p);
719         nr_threads++;
720         write_unlock_irq(&tasklist_lock);
721
722         if (p->ptrace & PT_PTRACED)
723                 send_sig(SIGSTOP, p, 1);
724
725         wake_up_process(p);             /* do this last */
726         ++total_forks;
727         if (clone_flags & CLONE_VFORK)
728                 wait_for_completion(&vfork);
729
730 fork_out:
731         return retval;
732
733 bad_fork_cleanup_mm:
734         exit_mm(p);
735 bad_fork_cleanup_sighand:
736         exit_sighand(p);
737 bad_fork_cleanup_fs:
738         exit_fs(p); /* blocking */
739 bad_fork_cleanup_files:
740         exit_files(p); /* blocking */
741 bad_fork_cleanup:
742         put_exec_domain(p->exec_domain);
743         if (p->binfmt && p->binfmt->module)
744                 __MOD_DEC_USE_COUNT(p->binfmt->module);
745 bad_fork_cleanup_count:
746         atomic_dec(&p->user->processes);
747         free_uid(p->user);
748 bad_fork_free:
749         free_task_struct(p);
750         goto fork_out;
751 }
752
753 /* SLAB cache for signal_struct structures (tsk->sig) */
754 kmem_cache_t *sigact_cachep;
755
756 /* SLAB cache for files_struct structures (tsk->files) */
757 kmem_cache_t *files_cachep;
758
759 /* SLAB cache for fs_struct structures (tsk->fs) */
760 kmem_cache_t *fs_cachep;
761
762 /* SLAB cache for vm_area_struct structures */
763 kmem_cache_t *vm_area_cachep;
764
765 /* SLAB cache for mm_struct structures (tsk->mm) */
766 kmem_cache_t *mm_cachep;
767
768 void __init proc_caches_init(void)
769 {
770         sigact_cachep = kmem_cache_create("signal_act",
771                         sizeof(struct signal_struct), 0,
772                         SLAB_HWCACHE_ALIGN, NULL, NULL);
773         if (!sigact_cachep)
774                 panic("Cannot create signal action SLAB cache");
775
776         files_cachep = kmem_cache_create("files_cache", 
777                          sizeof(struct files_struct), 0, 
778                          SLAB_HWCACHE_ALIGN, NULL, NULL);
779         if (!files_cachep) 
780                 panic("Cannot create files SLAB cache");
781
782         fs_cachep = kmem_cache_create("fs_cache", 
783                          sizeof(struct fs_struct), 0, 
784                          SLAB_HWCACHE_ALIGN, NULL, NULL);
785         if (!fs_cachep) 
786                 panic("Cannot create fs_struct SLAB cache");
787  
788         vm_area_cachep = kmem_cache_create("vm_area_struct",
789                         sizeof(struct vm_area_struct), 0,
790                         SLAB_HWCACHE_ALIGN, NULL, NULL);
791         if(!vm_area_cachep)
792                 panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
793
794         mm_cachep = kmem_cache_create("mm_struct",
795                         sizeof(struct mm_struct), 0,
796                         SLAB_HWCACHE_ALIGN, NULL, NULL);
797         if(!mm_cachep)
798                 panic("vma_init: Cannot alloc mm_struct SLAB cache");
799 }