4 #include <asm/param.h>/* for HZ */ 6 externunsigned long event
; 8 #include <linux/binfmts.h> 9 #include <linux/personality.h> 10 #include <linux/tasks.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/times.h> 15 #include <asm/system.h> 16 #include <asm/semaphore.h> 19 #include <linux/smp.h> 20 #include <linux/tty.h> 21 #include <linux/sem.h> 22 #include <linux/signal.h> 23 #include <linux/capability.h> 24 #include <linux/securebits.h> 29 #define CSIGNAL 0x000000ff/* signal mask to be sent at exit */ 30 #define CLONE_VM 0x00000100/* set if VM shared between processes */ 31 #define CLONE_FS 0x00000200/* set if fs info shared between processes */ 32 #define CLONE_FILES 0x00000400/* set if open files shared between processes */ 33 #define CLONE_SIGHAND 0x00000800/* set if signal handlers shared */ 34 #define CLONE_PID 0x00001000/* set if pid shared */ 35 #define CLONE_PTRACE 0x00002000/* set if we want to let tracing continue on the child too */ 38 * These are the constant used to fake the fixed-point load-average 39 * counting. Some notes: 40 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 41 * a load-average precision of 10 bits integer + 11 bits fractional 42 * - if you want to count load-averages more often, you need more 43 * precision, or rounding will get you. With 2-second counting freq, 44 * the EXP_n values would be 1981, 2034 and 2043 if still using only 47 externunsigned long avenrun
[];/* Load averages */ 49 #define FSHIFT 11/* nr of bits of precision */ 50 #define FIXED_1 (1<<FSHIFT)/* 1.0 as fixed-point */ 51 #define LOAD_FREQ (5*HZ)/* 5 sec intervals */ 52 #define EXP_1 1884/* 1/exp(5sec/1min) as fixed-point */ 53 #define EXP_5 2014/* 1/exp(5sec/5min) */ 54 #define EXP_15 2037/* 1/exp(5sec/15min) */ 56 #define CALC_LOAD(load,exp,n) \ 58 load += n*(FIXED_1-exp); \ 61 #define CT_TO_SECS(x) ((x) / HZ) 62 #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) 64 externint nr_running
, nr_tasks
; 68 #include <linux/signal.h> 69 #include <linux/time.h> 70 #include <linux/param.h> 71 #include <linux/resource.h> 72 #include <linux/ptrace.h> 73 #include <linux/timer.h> 75 #include <asm/processor.h> 77 #define TASK_RUNNING 0 78 #define TASK_INTERRUPTIBLE 1 79 #define TASK_UNINTERRUPTIBLE 2 81 #define TASK_STOPPED 8 82 #define TASK_SWAPPING 16 92 * This is an additional bit set when we want to 93 * yield the CPU for one re-schedule.. 95 #define SCHED_YIELD 0x10 102 #define NULL ((void *) 0) 107 #include <asm/spinlock.h> 110 * This serializes "schedule()" and also protects 111 * the run-queue from deletions/modifications (but 112 * _adding_ to the beginning of the run-queue has 115 extern rwlock_t tasklist_lock
; 116 extern spinlock_t scheduler_lock
; 118 externvoidsched_init(void); 119 externvoidshow_state(void); 120 externvoidtrap_init(void); 122 asmlinkage
voidschedule(void); 126 * Open file table structure 128 struct files_struct
{ 131 struct file
** fd
;/* current fd array */ 132 fd_set close_on_exec
; 136 #define INIT_FILES { \ 147 struct dentry
* root
, * pwd
; 156 /* Maximum number of active map areas.. This is a random (large) number */ 157 #define MAX_MAP_COUNT (65536) 160 struct vm_area_struct
*mmap
, *mmap_cache
; 164 struct semaphore mmap_sem
; 165 unsigned long context
; 166 unsigned long start_code
, end_code
, start_data
, end_data
; 167 unsigned long start_brk
, brk
, start_stack
; 168 unsigned long arg_start
, arg_end
, env_start
, env_end
; 169 unsigned long rss
, total_vm
, locked_vm
; 170 unsigned long def_flags
; 171 unsigned long cpu_vm_mask
; 173 * This is an architecture-specific pointer: the portable 174 * part of Linux does not know about any segments. 180 &init_mmap, NULL, swapper_pg_dir, \ 190 struct signal_struct
{ 192 struct k_sigaction action
[_NSIG
]; 197 #define INIT_SIGNALS { \ 203 * Some day this will be a full-fledged user tracking system.. 204 * Right now it is only used to track how many processes a 205 * user has, but it has the potential to track memory usage etc. 210 /* these are hardcoded - don't touch */ 211 volatilelong state
;/* -1 unrunnable, 0 runnable, >0 stopped */ 212 unsigned long flags
;/* per process flags, defined below */ 214 mm_segment_t addr_limit
;/* thread address space: 215 0-0xBFFFFFFF for user-thead 216 0-0xFFFFFFFF for kernel-thread 218 struct exec_domain
*exec_domain
; 224 /* SMP and runqueue state */ 228 int lock_depth
;/* Lock depth. We can context switch in and out of holding a syscall kernel lock... */ 229 struct task_struct
*next_task
, *prev_task
; 230 struct task_struct
*next_run
, *prev_run
; 233 struct linux_binfmt
*binfmt
; 234 int exit_code
, exit_signal
; 235 int pdeath_signal
;/* The signal sent when the parent dies */ 237 unsigned long personality
; 244 /* boolean value for session group leader */ 247 * pointers to (original) parent process, youngest child, younger sibling, 248 * older sibling, respectively. (p->father can be replaced with 251 struct task_struct
*p_opptr
, *p_pptr
, *p_cptr
, *p_ysptr
, *p_osptr
; 253 /* PID hash table linkage. */ 254 struct task_struct
*pidhash_next
; 255 struct task_struct
**pidhash_pprev
; 257 /* Pointer to task[] array linkage. */ 258 struct task_struct
**tarray_ptr
; 260 struct wait_queue
*wait_chldexit
;/* for wait4() */ 261 unsigned long timeout
, policy
, rt_priority
; 262 unsigned long it_real_value
, it_prof_value
, it_virt_value
; 263 unsigned long it_real_incr
, it_prof_incr
, it_virt_incr
; 264 struct timer_list real_timer
; 266 unsigned long start_time
; 267 long per_cpu_utime
[NR_CPUS
], per_cpu_stime
[NR_CPUS
]; 268 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 269 unsigned long min_flt
, maj_flt
, nswap
, cmin_flt
, cmaj_flt
, cnswap
; 271 unsigned long swap_address
; 272 unsigned long old_maj_flt
;/* old value of maj_flt */ 273 unsigned long dec_flt
;/* page fault count of the last time */ 274 unsigned long swap_cnt
;/* number of pages to swap on next pass */ 275 /* process credentials */ 276 uid_t uid
,euid
,suid
,fsuid
; 277 gid_t gid
,egid
,sgid
,fsgid
; 279 gid_t groups
[NGROUPS
]; 280 kernel_cap_t cap_effective
, cap_inheritable
, cap_permitted
; 281 struct user_struct
*user
; 283 struct rlimit rlim
[RLIM_NLIMITS
]; 284 unsigned short used_math
; 286 /* file system info */ 288 struct tty_struct
*tty
;/* NULL if no tty */ 290 struct sem_undo
*semundo
; 291 struct sem_queue
*semsleeping
; 292 /* tss for this task */ 293 struct thread_struct tss
; 294 /* filesystem information */ 295 struct fs_struct
*fs
; 296 /* open file information */ 297 struct files_struct
*files
; 298 /* memory management info */ 299 struct mm_struct
*mm
; 300 /* signal handlers */ 301 spinlock_t sigmask_lock
;/* Protects signal and blocked */ 302 struct signal_struct
*sig
; 303 sigset_t signal
, blocked
; 304 struct signal_queue
*sigqueue
, **sigqueue_tail
; 305 unsigned long sas_ss_sp
; 312 #define PF_ALIGNWARN 0x00000001/* Print alignment warning msgs */ 313 /* Not implemented yet, only for 486*/ 314 #define PF_STARTING 0x00000002/* being created */ 315 #define PF_EXITING 0x00000004/* getting shut down */ 316 #define PF_PTRACED 0x00000010/* set if ptrace (0) has been called */ 317 #define PF_TRACESYS 0x00000020/* tracing system calls */ 318 #define PF_FORKNOEXEC 0x00000040/* forked but didn't exec */ 319 #define PF_SUPERPRIV 0x00000100/* used super-user privileges */ 320 #define PF_DUMPCORE 0x00000200/* dumped core */ 321 #define PF_SIGNALED 0x00000400/* killed by a signal */ 322 #define PF_MEMALLOC 0x00000800/* Allocating memory */ 324 #define PF_USEDFPU 0x00100000/* task used FPU this quantum (SMP) */ 325 #define PF_DTRACE 0x00200000/* delayed trace (used on m68k) */ 328 * Limit the stack by to some sane default: root can always 329 * increase this limit if needed.. 8MB seems reasonable. 331 #define _STK_LIM (8*1024*1024) 333 #define DEF_PRIORITY (20*HZ/100)/* 200 ms time slices */ 336 * INIT_TASK is used to set up the first task table, touch at 337 * your own risk!. Base=0, limit=0x1fffff (=2MB) 340 /* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \ 341 /* counter */ DEF_PRIORITY,DEF_PRIORITY, \ 342 /* SMP */ 0,0,0,-1, \ 343 /* schedlink */ &init_task,&init_task, &init_task, &init_task, \ 345 /* ec,brk... */ 0,0,0,0,0,0, \ 346 /* pid etc.. */ 0,0,0,0,0, \ 347 /* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \ 348 /* pidhash */ NULL, NULL, \ 349 /* tarray */ &task[0], \ 350 /* chld wait */ NULL, \ 351 /* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \ 352 /* timer */ { NULL, NULL, 0, 0, it_real_fn }, \ 353 /* utime */ {0,0,0,0},0, \ 354 /* per CPU times */ {0, }, {0, }, \ 355 /* flt */ 0,0,0,0,0,0, \ 356 /* swp */ 0,0,0,0,0, \ 357 /* process credentials */ \ 358 /* uid etc */ 0,0,0,0,0,0,0,0, \ 359 /* suppl grps*/ 0, {0,}, \ 360 /* caps */ CAP_INIT_EFF_SET,CAP_INIT_INH_SET,CAP_FULL_SET, \ 362 /* rlimits */ INIT_RLIMITS, \ 364 /* comm */"swapper", \ 365 /* fs info */ 0,NULL, \ 366 /* ipc */ NULL, NULL, \ 367 /* tss */ INIT_TSS, \ 369 /* files */ &init_files, \ 371 /* signals */ SPIN_LOCK_UNLOCKED, &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \ 375 struct task_struct task
; 376 unsigned long stack
[2048]; 379 externunion task_union init_task_union
; 381 externstruct mm_struct init_mm
; 382 externstruct task_struct
*task
[NR_TASKS
]; 384 externstruct task_struct
**tarray_freelist
; 385 extern spinlock_t taskslot_lock
; 387 extern __inline__
voidadd_free_taskslot(struct task_struct
**t
) 389 spin_lock(&taskslot_lock
); 390 *t
= (struct task_struct
*) tarray_freelist
; 392 spin_unlock(&taskslot_lock
); 395 extern __inline__
struct task_struct
**get_free_taskslot(void) 397 struct task_struct
**tslot
; 399 spin_lock(&taskslot_lock
); 400 if((tslot
= tarray_freelist
) != NULL
) 401 tarray_freelist
= (struct task_struct
**) *tslot
; 402 spin_unlock(&taskslot_lock
); 408 #define PIDHASH_SZ (NR_TASKS >> 2) 409 externstruct task_struct
*pidhash
[PIDHASH_SZ
]; 411 #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) 413 extern __inline__
voidhash_pid(struct task_struct
*p
) 415 struct task_struct
**htable
= &pidhash
[pid_hashfn(p
->pid
)]; 417 if((p
->pidhash_next
= *htable
) != NULL
) 418 (*htable
)->pidhash_pprev
= &p
->pidhash_next
; 420 p
->pidhash_pprev
= htable
; 423 extern __inline__
voidunhash_pid(struct task_struct
*p
) 426 p
->pidhash_next
->pidhash_pprev
= p
->pidhash_pprev
; 427 *p
->pidhash_pprev
= p
->pidhash_next
; 430 extern __inline__
struct task_struct
*find_task_by_pid(int pid
) 432 struct task_struct
*p
, **htable
= &pidhash
[pid_hashfn(pid
)]; 434 for(p
= *htable
; p
&& p
->pid
!= pid
; p
= p
->pidhash_next
) 440 /* per-UID process charging. */ 441 externintalloc_uid(struct task_struct
*p
); 442 voidfree_uid(struct task_struct
*p
); 444 #include <asm/current.h> 446 externunsigned longvolatile jiffies
; 447 externunsigned long itimer_ticks
; 448 externunsigned long itimer_next
; 449 externstruct timeval xtime
; 450 externvoiddo_timer(struct pt_regs
*); 452 externunsigned int* prof_buffer
; 453 externunsigned long prof_len
; 454 externunsigned long prof_shift
; 456 #define CURRENT_TIME (xtime.tv_sec) 458 externvoidFASTCALL(__wake_up(struct wait_queue
** p
,unsigned int mode
)); 459 externvoidFASTCALL(sleep_on(struct wait_queue
** p
)); 460 externvoidFASTCALL(interruptible_sleep_on(struct wait_queue
** p
)); 461 externvoidFASTCALL(wake_up_process(struct task_struct
* tsk
)); 463 #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) 464 #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE) 466 externintin_group_p(gid_t grp
); 468 externvoidflush_signals(struct task_struct
*); 469 externvoidflush_signal_handlers(struct task_struct
*); 470 externintdequeue_signal(sigset_t
*block
, siginfo_t
*); 471 externintsend_sig_info(int,struct siginfo
*info
,struct task_struct
*); 472 externintforce_sig_info(int,struct siginfo
*info
,struct task_struct
*); 473 externintkill_pg_info(int,struct siginfo
*info
, pid_t
); 474 externintkill_sl_info(int,struct siginfo
*info
, pid_t
); 475 externintkill_proc_info(int,struct siginfo
*info
, pid_t
); 476 externintkill_something_info(int,struct siginfo
*info
,int); 477 externvoidnotify_parent(struct task_struct
* tsk
,int); 478 externvoidforce_sig(int sig
,struct task_struct
* p
); 479 externintsend_sig(int sig
,struct task_struct
* p
,int priv
); 480 externintkill_pg(pid_t
,int,int); 481 externintkill_sl(pid_t
,int,int); 482 externintkill_proc(pid_t
,int,int); 483 externintdo_sigaction(int sig
,const struct k_sigaction
*act
, 484 struct k_sigaction
*oact
); 485 externintdo_sigaltstack(const stack_t
*ss
, stack_t
*oss
,unsigned long sp
); 487 extern inlineintsignal_pending(struct task_struct
*p
) 489 return(p
->sigpending
!=0); 492 /* Reevaluate whether the task has signals pending delivery. 493 This is required every time the blocked sigset_t changes. 494 All callers should have t->sigmask_lock. */ 496 staticinlinevoidrecalc_sigpending(struct task_struct
*t
) 501 switch(_NSIG_WORDS
) { 503 for(i
= _NSIG_WORDS
, ready
=0; --i
>=0;) 504 ready
|= t
->signal
.sig
[i
] &~ t
->blocked
.sig
[i
]; 507 case4: ready
= t
->signal
.sig
[3] &~ t
->blocked
.sig
[3]; 508 ready
|= t
->signal
.sig
[2] &~ t
->blocked
.sig
[2]; 509 ready
|= t
->signal
.sig
[1] &~ t
->blocked
.sig
[1]; 510 ready
|= t
->signal
.sig
[0] &~ t
->blocked
.sig
[0]; 513 case2: ready
= t
->signal
.sig
[1] &~ t
->blocked
.sig
[1]; 514 ready
|= t
->signal
.sig
[0] &~ t
->blocked
.sig
[0]; 517 case1: ready
= t
->signal
.sig
[0] &~ t
->blocked
.sig
[0]; 520 t
->sigpending
= (ready
!=0); 523 /* True if we are on the alternate signal stack. */ 525 staticinlineinton_sig_stack(unsigned long sp
) 527 return(sp
>= current
->sas_ss_sp
528 && sp
< current
->sas_ss_sp
+ current
->sas_ss_size
); 531 staticinlineintsas_ss_flags(unsigned long sp
) 533 return(current
->sas_ss_size
==0? SS_DISABLE
534 :on_sig_stack(sp
) ? SS_ONSTACK
:0); 537 externintrequest_irq(unsigned int irq
, 538 void(*handler
)(int,void*,struct pt_regs
*), 542 externvoidfree_irq(unsigned int irq
,void*dev_id
); 545 * This has now become a routine instead of a macro, it sets a flag if 546 * it returns true (to do BSD-style accounting where the process is flagged 547 * if it uses root privs). The implication of this is that you should do 548 * normal permissions checks first, and check suser() last. 550 * [Dec 1997 -- Chris Evans] 551 * For correctness, the above considerations need to be extended to 552 * fsuser(). This is done, along with moving fsuser() checks to be 555 * These will be removed, but in the mean time, when the SECURE_NOROOT 556 * flag is set, uids don't grant privilege. 558 extern inlineintsuser(void) 560 if(!issecure(SECURE_NOROOT
) && current
->euid
==0) { 561 current
->flags
|= PF_SUPERPRIV
; 567 extern inlineintfsuser(void) 569 if(!issecure(SECURE_NOROOT
) && current
->fsuid
==0) { 570 current
->flags
|= PF_SUPERPRIV
; 577 * capable() checks for a particular capability. 578 * New privilege checks should use this interface, rather than suser() or 579 * fsuser(). See include/linux/capability.h for defined capabilities. 582 extern inlineintcapable(int cap
) 585 if(cap_raised(current
->cap_effective
, cap
)) 587 if(cap_is_fs_cap(cap
) ? current
->fsuid
==0: current
->euid
==0) 590 current
->flags
|= PF_SUPERPRIV
; 597 * Routines for handling mm_structs 599 externstruct mm_struct
*mm_alloc(void); 600 staticinlinevoidmmget(struct mm_struct
* mm
) 602 atomic_inc(&mm
->count
); 604 externvoidmmput(struct mm_struct
*); 606 externintcopy_thread(int,unsigned long,unsigned long,struct task_struct
*,struct pt_regs
*); 607 externvoidflush_thread(void); 608 externvoidexit_thread(void); 610 externvoidexit_mm(struct task_struct
*); 611 externvoidexit_fs(struct task_struct
*); 612 externvoidexit_files(struct task_struct
*); 613 externvoidexit_sighand(struct task_struct
*); 615 externintdo_execve(char*,char**,char**,struct pt_regs
*); 616 externintdo_fork(unsigned long,unsigned long,struct pt_regs
*); 619 * The wait-queues are circular lists, and you have to be *very* sure 620 * to keep them correct. Use only these two functions to add/remove 621 * entries in the queues. 623 extern inlinevoid__add_wait_queue(struct wait_queue
** p
,struct wait_queue
* wait
) 625 wait
->next
= *p
? :WAIT_QUEUE_HEAD(p
); 629 extern rwlock_t waitqueue_lock
; 631 extern inlinevoidadd_wait_queue(struct wait_queue
** p
,struct wait_queue
* wait
) 635 write_lock_irqsave(&waitqueue_lock
, flags
); 636 __add_wait_queue(p
, wait
); 637 write_unlock_irqrestore(&waitqueue_lock
, flags
); 640 extern inlinevoid__remove_wait_queue(struct wait_queue
** p
,struct wait_queue
* wait
) 642 struct wait_queue
* next
= wait
->next
; 643 struct wait_queue
* head
= next
; 644 struct wait_queue
* tmp
; 646 while((tmp
= head
->next
) != wait
) { 652 extern inlinevoidremove_wait_queue(struct wait_queue
** p
,struct wait_queue
* wait
) 656 write_lock_irqsave(&waitqueue_lock
, flags
); 657 __remove_wait_queue(p
, wait
); 658 write_unlock_irqrestore(&waitqueue_lock
, flags
); 661 #define REMOVE_LINKS(p) do { \ 662 (p)->next_task->prev_task = (p)->prev_task; \ 663 (p)->prev_task->next_task = (p)->next_task; \ 665 (p)->p_osptr->p_ysptr = (p)->p_ysptr; \ 667 (p)->p_ysptr->p_osptr = (p)->p_osptr; \ 669 (p)->p_pptr->p_cptr = (p)->p_osptr; \ 672 #define SET_LINKS(p) do { \ 673 (p)->next_task = &init_task; \ 674 (p)->prev_task = init_task.prev_task; \ 675 init_task.prev_task->next_task = (p); \ 676 init_task.prev_task = (p); \ 677 (p)->p_ysptr = NULL; \ 678 if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \ 679 (p)->p_osptr->p_ysptr = p; \ 680 (p)->p_pptr->p_cptr = p; \ 683 #define for_each_task(p) \ 684 for (p = &init_task ; (p = p->next_task) != &init_task ; ) 686 #endif/* __KERNEL__ */