4 #include <asm/param.h>/* for HZ */ 6 externunsigned long event
; 8 #include <linux/config.h> 9 #include <linux/binfmts.h> 10 #include <linux/personality.h> 11 #include <linux/threads.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/times.h> 15 #include <linux/timex.h> 17 #include <asm/system.h> 18 #include <asm/semaphore.h> 20 #include <asm/ptrace.h> 22 #include <linux/smp.h> 23 #include <linux/tty.h> 24 #include <linux/sem.h> 25 #include <linux/signal.h> 26 #include <linux/securebits.h> 27 #include <linux/fs_struct.h> 32 #define CSIGNAL 0x000000ff/* signal mask to be sent at exit */ 33 #define CLONE_VM 0x00000100/* set if VM shared between processes */ 34 #define CLONE_FS 0x00000200/* set if fs info shared between processes */ 35 #define CLONE_FILES 0x00000400/* set if open files shared between processes */ 36 #define CLONE_SIGNAL 0x00000800/* set if signal handlers and blocked signals shared */ 37 #define CLONE_PID 0x00001000/* set if pid shared */ 38 #define CLONE_PTRACE 0x00002000/* set if we want to let tracing continue on the child too */ 39 #define CLONE_VFORK 0x00004000/* set if the parent wants the child to wake it up on mm_release */ 40 #define CLONE_PARENT 0x00008000/* set if we want to have the same parent as the cloner */ 42 #define CLONE_SIGHAND CLONE_SIGNAL/* Old name */ 45 * These are the constant used to fake the fixed-point load-average 46 * counting. Some notes: 47 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 48 * a load-average precision of 10 bits integer + 11 bits fractional 49 * - if you want to count load-averages more often, you need more 50 * precision, or rounding will get you. With 2-second counting freq, 51 * the EXP_n values would be 1981, 2034 and 2043 if still using only 54 externunsigned long avenrun
[];/* Load averages */ 56 #define FSHIFT 11/* nr of bits of precision */ 57 #define FIXED_1 (1<<FSHIFT)/* 1.0 as fixed-point */ 58 #define LOAD_FREQ (5*HZ)/* 5 sec intervals */ 59 #define EXP_1 1884/* 1/exp(5sec/1min) as fixed-point */ 60 #define EXP_5 2014/* 1/exp(5sec/5min) */ 61 #define EXP_15 2037/* 1/exp(5sec/15min) */ 63 #define CALC_LOAD(load,exp,n) \ 65 load += n*(FIXED_1-exp); \ 68 #define CT_TO_SECS(x) ((x) / HZ) 69 #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) 71 externint nr_running
, nr_threads
; 75 #include <linux/time.h> 76 #include <linux/param.h> 77 #include <linux/resource.h> 78 #include <linux/timer.h> 80 #include <asm/processor.h> 82 #define TASK_RUNNING 0 83 #define TASK_INTERRUPTIBLE 1 84 #define TASK_UNINTERRUPTIBLE 2 86 #define TASK_STOPPED 8 87 #define TASK_EXCLUSIVE 32 89 #define __set_task_state(tsk, state_value) \ 90 do { (tsk)->state = (state_value); } while (0) 92 #define set_task_state(tsk, state_value) \ 93 set_mb((tsk)->state, (state_value)) 95 #define set_task_state(tsk, state_value) \ 96 __set_task_state((tsk), (state_value)) 99 #define __set_current_state(state_value) \ 100 do { current->state = (state_value); } while (0) 102 #define set_current_state(state_value) \ 103 set_mb(current->state, (state_value)) 105 #define set_current_state(state_value) \ 106 __set_current_state(state_value) 110 * Scheduling policies 112 #define SCHED_OTHER 0 117 * This is an additional bit set when we want to 118 * yield the CPU for one re-schedule.. 120 #define SCHED_YIELD 0x10 128 #include <linux/spinlock.h> 131 * This serializes "schedule()" and also protects 132 * the run-queue from deletions/modifications (but 133 * _adding_ to the beginning of the run-queue has 136 extern rwlock_t tasklist_lock
; 137 extern spinlock_t runqueue_lock
; 139 externvoidsched_init(void); 140 externvoidinit_idle(void); 141 externvoidshow_state(void); 142 externvoidcpu_init(void); 143 externvoidtrap_init(void); 144 externvoidupdate_process_times(int user
); 145 externvoidupdate_one_process(struct task_struct
*p
,unsigned long user
, 146 unsigned long system
,int cpu
); 148 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 149 externsigned longFASTCALL(schedule_timeout(signed long timeout
)); 150 asmlinkage
voidschedule(void); 153 * The default fd array needs to be at least BITS_PER_LONG, 154 * as this is the granularity returned by copy_fdset(). 156 #define NR_OPEN_DEFAULT BITS_PER_LONG 159 * Open file table structure 161 struct files_struct
{ 167 struct file
** fd
;/* current fd array */ 168 fd_set
*close_on_exec
; 170 fd_set close_on_exec_init
; 171 fd_set open_fds_init
; 172 struct file
* fd_array
[NR_OPEN_DEFAULT
]; 175 #define INIT_FILES { \ 181 &init_files.fd_array[0], \ 182 &init_files.close_on_exec_init, \ 183 &init_files.open_fds_init, \ 189 /* Maximum number of active map areas.. This is a random (large) number */ 190 #define MAX_MAP_COUNT (65536) 192 /* Number of map areas at which the AVL tree is activated. This is arbitrary. */ 193 #define AVL_MIN_MAP_COUNT 32 196 struct vm_area_struct
* mmap
;/* list of VMAs */ 197 struct vm_area_struct
* mmap_avl
;/* tree of VMAs */ 198 struct vm_area_struct
* mmap_cache
;/* last find_vma result */ 200 atomic_t mm_users
;/* How many users with user space? */ 201 atomic_t mm_count
;/* How many references to "struct mm_struct" (users count as 1) */ 202 int map_count
;/* number of VMAs */ 203 struct semaphore mmap_sem
; 204 spinlock_t page_table_lock
; 205 unsigned long context
; 206 unsigned long start_code
, end_code
, start_data
, end_data
; 207 unsigned long start_brk
, brk
, start_stack
; 208 unsigned long arg_start
, arg_end
, env_start
, env_end
; 209 unsigned long rss
, total_vm
, locked_vm
; 210 unsigned long def_flags
; 211 unsigned long cpu_vm_mask
; 212 unsigned long swap_cnt
;/* number of pages to swap on next pass */ 213 unsigned long swap_address
; 215 * This is an architecture-specific pointer: the portable 216 * part of Linux does not know about any segments. 221 #define INIT_MM(name) { \ 222 &init_mmap, NULL, NULL, \ 224 ATOMIC_INIT(2), ATOMIC_INIT(1), 1, \ 225 __MUTEX_INITIALIZER(name.mmap_sem), \ 226 SPIN_LOCK_UNLOCKED, \ 234 struct signal_struct
{ 236 struct k_sigaction action
[_NSIG
]; 237 struct sigpending pending
; 242 #define INIT_SIGNALS { \ 245 { NULL, &init_signals.pending.head, }, \ 249 * Some day this will be a full-fledged user tracking system.. 252 atomic_t __count
;/* reference count */ 253 atomic_t processes
;/* How many processes does this user have? */ 254 atomic_t files
;/* How many open files does this user have? */ 256 /* Hash table maintenance information */ 257 struct user_struct
*next
, **pprev
; 261 #define get_current_user() ({ \ 262 struct user_struct *__user = current->user; \ 263 atomic_inc(&__user->__count); \ 266 externstruct user_struct root_user
; 267 #define INIT_USER (&root_user) 271 * offsets of these are hardcoded elsewhere - touch with care 273 volatilelong state
;/* -1 unrunnable, 0 runnable, >0 stopped */ 274 unsigned long flags
;/* per process flags, defined below */ 276 mm_segment_t addr_limit
;/* thread address space: 277 0-0xBFFFFFFF for user-thead 278 0-0xFFFFFFFF for kernel-thread 280 struct exec_domain
*exec_domain
; 281 volatilelong need_resched
; 282 unsigned long ptrace
; 284 int lock_depth
;/* Lock depth */ 287 * offset 32 begins here on 32-bit platforms. We keep 288 * all fields in a single cacheline that are needed for 289 * the goodness() loop in schedule(). 293 unsigned long policy
; 294 struct mm_struct
*mm
; 295 int has_cpu
, processor
; 296 unsigned long cpus_allowed
; 298 * (only the 'next' pointer fits into the cacheline, but 301 struct list_head run_list
; 303 struct task_struct
*next_task
, *prev_task
; 304 struct mm_struct
*active_mm
; 307 struct linux_binfmt
*binfmt
; 308 int exit_code
, exit_signal
; 309 int pdeath_signal
;/* The signal sent when the parent dies */ 311 unsigned long personality
; 319 /* boolean value for session group leader */ 322 * pointers to (original) parent process, youngest child, younger sibling, 323 * older sibling, respectively. (p->father can be replaced with 326 struct task_struct
*p_opptr
, *p_pptr
, *p_cptr
, *p_ysptr
, *p_osptr
; 327 struct list_head thread_group
; 329 /* PID hash table linkage. */ 330 struct task_struct
*pidhash_next
; 331 struct task_struct
**pidhash_pprev
; 333 wait_queue_head_t wait_chldexit
;/* for wait4() */ 334 struct semaphore
*vfork_sem
;/* for vfork() */ 335 unsigned long rt_priority
; 336 unsigned long it_real_value
, it_prof_value
, it_virt_value
; 337 unsigned long it_real_incr
, it_prof_incr
, it_virt_incr
; 338 struct timer_list real_timer
; 340 unsigned long start_time
; 341 long per_cpu_utime
[NR_CPUS
], per_cpu_stime
[NR_CPUS
]; 342 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 343 unsigned long min_flt
, maj_flt
, nswap
, cmin_flt
, cmaj_flt
, cnswap
; 345 /* process credentials */ 346 uid_t uid
,euid
,suid
,fsuid
; 347 gid_t gid
,egid
,sgid
,fsgid
; 349 gid_t groups
[NGROUPS
]; 350 kernel_cap_t cap_effective
, cap_inheritable
, cap_permitted
; 351 int keep_capabilities
:1; 352 struct user_struct
*user
; 354 struct rlimit rlim
[RLIM_NLIMITS
]; 355 unsigned short used_math
; 357 /* file system info */ 359 struct tty_struct
*tty
;/* NULL if no tty */ 361 struct sem_undo
*semundo
; 362 struct sem_queue
*semsleeping
; 363 /* CPU-specific state of this task */ 364 struct thread_struct thread
; 365 /* filesystem information */ 366 struct fs_struct
*fs
; 367 /* open file information */ 368 struct files_struct
*files
; 369 /* signal handlers */ 370 spinlock_t sigmask_lock
;/* Protects signal and blocked */ 371 struct signal_struct
*sig
; 374 struct sigpending pending
; 376 unsigned long sas_ss_sp
; 378 int(*notifier
)(void*priv
); 380 sigset_t
*notifier_mask
; 382 /* Thread group tracking */ 385 /* Protection of (de-)allocation: mm, files, fs, tty */ 386 spinlock_t alloc_lock
; 392 #define PF_ALIGNWARN 0x00000001/* Print alignment warning msgs */ 393 /* Not implemented yet, only for 486*/ 394 #define PF_STARTING 0x00000002/* being created */ 395 #define PF_EXITING 0x00000004/* getting shut down */ 396 #define PF_FORKNOEXEC 0x00000040/* forked but didn't exec */ 397 #define PF_SUPERPRIV 0x00000100/* used super-user privileges */ 398 #define PF_DUMPCORE 0x00000200/* dumped core */ 399 #define PF_SIGNALED 0x00000400/* killed by a signal */ 400 #define PF_MEMALLOC 0x00000800/* Allocating memory */ 401 #define PF_VFORK 0x00001000/* Wake up parent in mm_release */ 403 #define PF_USEDFPU 0x00100000/* task used FPU this quantum (SMP) */ 409 #define PT_PTRACED 0x00000001 410 #define PT_TRACESYS 0x00000002 411 #define PT_DTRACE 0x00000004/* delayed trace (used on m68k, i386) */ 414 * Limit the stack by to some sane default: root can always 415 * increase this limit if needed.. 8MB seems reasonable. 417 #define _STK_LIM (8*1024*1024) 419 #define DEF_COUNTER (10*HZ/100)/* 100 ms time slice */ 420 #define MAX_COUNTER (20*HZ/100) 424 * INIT_TASK is used to set up the first task table, touch at 425 * your own risk!. Base=0, limit=0x1fffff (=2MB) 427 #define INIT_TASK(tsk) \ 432 addr_limit: KERNEL_DS, \ 433 exec_domain: &default_exec_domain, \ 435 counter: DEF_COUNTER, \ 437 policy: SCHED_OTHER, \ 439 active_mm: &init_mm, \ 441 run_list: LIST_HEAD_INIT(tsk.run_list), \ 446 thread_group: LIST_HEAD_INIT(tsk.thread_group), \ 447 wait_chldexit: __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\ 449 function: it_real_fn \ 451 cap_effective: CAP_INIT_EFF_SET, \ 452 cap_inheritable: CAP_INIT_INH_SET, \ 453 cap_permitted: CAP_FULL_SET, \ 454 keep_capabilities: 0, \ 455 rlim: INIT_RLIMITS, \ 458 thread: INIT_THREAD, \ 460 files: &init_files, \ 461 sigmask_lock: SPIN_LOCK_UNLOCKED, \ 462 sig: &init_signals, \ 463 pending: { NULL, &tsk.pending.head, {{0}}}, \ 465 alloc_lock: SPIN_LOCK_UNLOCKED \ 469 #ifndef INIT_TASK_SIZE 470 # define INIT_TASK_SIZE 2048*sizeof(long) 474 struct task_struct task
; 475 unsigned long stack
[INIT_TASK_SIZE
/sizeof(long)]; 478 externunion task_union init_task_union
; 480 externstruct mm_struct init_mm
; 481 externstruct task_struct
*init_tasks
[NR_CPUS
]; 483 /* PID hashing. (shouldnt this be dynamic?) */ 484 #define PIDHASH_SZ (4096 >> 2) 485 externstruct task_struct
*pidhash
[PIDHASH_SZ
]; 487 #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) 489 staticinlinevoidhash_pid(struct task_struct
*p
) 491 struct task_struct
**htable
= &pidhash
[pid_hashfn(p
->pid
)]; 493 if((p
->pidhash_next
= *htable
) != NULL
) 494 (*htable
)->pidhash_pprev
= &p
->pidhash_next
; 496 p
->pidhash_pprev
= htable
; 499 staticinlinevoidunhash_pid(struct task_struct
*p
) 502 p
->pidhash_next
->pidhash_pprev
= p
->pidhash_pprev
; 503 *p
->pidhash_pprev
= p
->pidhash_next
; 506 staticinlinestruct task_struct
*find_task_by_pid(int pid
) 508 struct task_struct
*p
, **htable
= &pidhash
[pid_hashfn(pid
)]; 510 for(p
= *htable
; p
&& p
->pid
!= pid
; p
= p
->pidhash_next
) 516 /* per-UID process charging. */ 517 externstruct user_struct
*alloc_uid(uid_t
); 518 externvoidfree_uid(struct user_struct
*); 520 #include <asm/current.h> 522 externunsigned longvolatile jiffies
; 523 externunsigned long itimer_ticks
; 524 externunsigned long itimer_next
; 525 externstruct timeval xtime
; 526 externvoiddo_timer(struct pt_regs
*); 528 externunsigned int* prof_buffer
; 529 externunsigned long prof_len
; 530 externunsigned long prof_shift
; 532 #define CURRENT_TIME (xtime.tv_sec) 534 externvoidFASTCALL(__wake_up(wait_queue_head_t
*q
,unsigned int mode
)); 535 externvoidFASTCALL(__wake_up_sync(wait_queue_head_t
*q
,unsigned int mode
)); 536 externvoidFASTCALL(sleep_on(wait_queue_head_t
*q
)); 537 externlongFASTCALL(sleep_on_timeout(wait_queue_head_t
*q
, 538 signed long timeout
)); 539 externvoidFASTCALL(interruptible_sleep_on(wait_queue_head_t
*q
)); 540 externlongFASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t
*q
, 541 signed long timeout
)); 542 externvoidFASTCALL(wake_up_process(struct task_struct
* tsk
)); 544 #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE | TASK_EXCLUSIVE) 545 #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE) 546 #define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE | TASK_EXCLUSIVE) 547 #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE | TASK_EXCLUSIVE) 548 #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE) 549 #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE | TASK_EXCLUSIVE) 551 externintin_group_p(gid_t
); 552 externintin_egroup_p(gid_t
); 554 externvoidproc_caches_init(void); 555 externvoidflush_signals(struct task_struct
*); 556 externvoidflush_signal_handlers(struct task_struct
*); 557 externintdequeue_signal(sigset_t
*, siginfo_t
*); 558 externvoidblock_all_signals(int(*notifier
)(void*priv
),void*priv
, 560 externvoidunblock_all_signals(void); 561 externintsend_sig_info(int,struct siginfo
*,struct task_struct
*); 562 externintforce_sig_info(int,struct siginfo
*,struct task_struct
*); 563 externintkill_pg_info(int,struct siginfo
*, pid_t
); 564 externintkill_sl_info(int,struct siginfo
*, pid_t
); 565 externintkill_proc_info(int,struct siginfo
*, pid_t
); 566 externvoidnotify_parent(struct task_struct
*,int); 567 externvoiddo_notify_parent(struct task_struct
*,int); 568 externvoidforce_sig(int,struct task_struct
*); 569 externintsend_sig(int,struct task_struct
*,int); 570 externintkill_pg(pid_t
,int,int); 571 externintkill_sl(pid_t
,int,int); 572 externintkill_proc(pid_t
,int,int); 573 externintdo_sigaction(int,const struct k_sigaction
*,struct k_sigaction
*); 574 externintdo_sigaltstack(const stack_t
*, stack_t
*,unsigned long); 576 staticinlineintsignal_pending(struct task_struct
*p
) 578 return(p
->sigpending
!=0); 582 * Re-calculate pending state from the set of locally pending 583 * signals, globally pending signals, and blocked signals. 585 staticinlineinthas_pending_signals(sigset_t
*p1
, sigset_t
*p2
, sigset_t
*blocked
) 590 switch(_NSIG_WORDS
) { 592 for(i
= _NSIG_WORDS
, ready
=0; --i
>=0;) 593 ready
|= (p1
->sig
[i
] | p2
->sig
[i
]) &~ blocked
->sig
[i
]; 596 case4: ready
= (p1
->sig
[3] | p2
->sig
[3]) &~ blocked
->sig
[3]; 597 ready
|= (p1
->sig
[2] | p2
->sig
[2]) &~ blocked
->sig
[2]; 598 ready
|= (p1
->sig
[1] | p2
->sig
[1]) &~ blocked
->sig
[1]; 599 ready
|= (p1
->sig
[0] | p2
->sig
[0]) &~ blocked
->sig
[0]; 602 case2: ready
= (p1
->sig
[1] | p2
->sig
[1]) &~ blocked
->sig
[1]; 603 ready
|= (p1
->sig
[0] | p2
->sig
[0]) &~ blocked
->sig
[0]; 606 case1: ready
= (p1
->sig
[0] | p2
->sig
[0]) &~ blocked
->sig
[0]; 611 /* Reevaluate whether the task has signals pending delivery. 612 This is required every time the blocked sigset_t changes. 613 All callers should have t->sigmask_lock. */ 615 staticinlinevoidrecalc_sigpending(struct task_struct
*t
) 617 t
->sigpending
=has_pending_signals(&t
->pending
.signal
, &t
->sig
->pending
.signal
, &t
->blocked
); 620 /* True if we are on the alternate signal stack. */ 622 staticinlineinton_sig_stack(unsigned long sp
) 624 return(sp
- current
->sas_ss_sp
< current
->sas_ss_size
); 627 staticinlineintsas_ss_flags(unsigned long sp
) 629 return(current
->sas_ss_size
==0? SS_DISABLE
630 :on_sig_stack(sp
) ? SS_ONSTACK
:0); 633 externintrequest_irq(unsigned int, 634 void(*handler
)(int,void*,struct pt_regs
*), 635 unsigned long,const char*,void*); 636 externvoidfree_irq(unsigned int,void*); 639 * This has now become a routine instead of a macro, it sets a flag if 640 * it returns true (to do BSD-style accounting where the process is flagged 641 * if it uses root privs). The implication of this is that you should do 642 * normal permissions checks first, and check suser() last. 644 * [Dec 1997 -- Chris Evans] 645 * For correctness, the above considerations need to be extended to 646 * fsuser(). This is done, along with moving fsuser() checks to be 649 * These will be removed, but in the mean time, when the SECURE_NOROOT 650 * flag is set, uids don't grant privilege. 652 staticinlineintsuser(void) 654 if(!issecure(SECURE_NOROOT
) && current
->euid
==0) { 655 current
->flags
|= PF_SUPERPRIV
; 661 staticinlineintfsuser(void) 663 if(!issecure(SECURE_NOROOT
) && current
->fsuid
==0) { 664 current
->flags
|= PF_SUPERPRIV
; 671 * capable() checks for a particular capability. 672 * New privilege checks should use this interface, rather than suser() or 673 * fsuser(). See include/linux/capability.h for defined capabilities. 676 staticinlineintcapable(int cap
) 679 if(cap_raised(current
->cap_effective
, cap
)) 681 if(cap_is_fs_cap(cap
) ? current
->fsuid
==0: current
->euid
==0) 684 current
->flags
|= PF_SUPERPRIV
; 691 * Routines for handling mm_structs 693 externstruct mm_struct
*mm_alloc(void); 695 externstruct mm_struct
*start_lazy_tlb(void); 696 externvoidend_lazy_tlb(struct mm_struct
*mm
); 698 /* mmdrop drops the mm and the page tables */ 699 extern inlinevoidFASTCALL(__mmdrop(struct mm_struct
*)); 700 staticinlinevoidmmdrop(struct mm_struct
* mm
) 702 if(atomic_dec_and_test(&mm
->mm_count
)) 706 /* mmput gets rid of the mappings and all user-space */ 707 externvoidmmput(struct mm_struct
*); 708 /* Remove the current tasks stale references to the old mm_struct */ 709 externvoidmm_release(void); 712 * Routines for handling the fd arrays 714 externstruct file
**alloc_fd_array(int); 715 externintexpand_fd_array(struct files_struct
*,int nr
); 716 externvoidfree_fd_array(struct file
**,int); 718 extern fd_set
*alloc_fdset(int); 719 externintexpand_fdset(struct files_struct
*,int nr
); 720 externvoidfree_fdset(fd_set
*,int); 722 externintcopy_thread(int,unsigned long,unsigned long,struct task_struct
*,struct pt_regs
*); 723 externvoidflush_thread(void); 724 externvoidexit_thread(void); 726 externvoidexit_mm(struct task_struct
*); 727 externvoidexit_files(struct task_struct
*); 728 externvoidexit_sighand(struct task_struct
*); 730 externvoiddaemonize(void); 732 externintdo_execve(char*,char**,char**,struct pt_regs
*); 733 externintdo_fork(unsigned long,unsigned long,struct pt_regs
*); 735 externvoidFASTCALL(add_wait_queue(wait_queue_head_t
*q
, wait_queue_t
* wait
)); 736 externvoidFASTCALL(add_wait_queue_exclusive(wait_queue_head_t
*q
, wait_queue_t
* wait
)); 737 externvoidFASTCALL(remove_wait_queue(wait_queue_head_t
*q
, wait_queue_t
* wait
)); 739 #define __wait_event(wq, condition) \ 741 wait_queue_t __wait; \ 742 init_waitqueue_entry(&__wait, current); \ 744 add_wait_queue(&wq, &__wait); \ 746 set_current_state(TASK_UNINTERRUPTIBLE); \ 751 current->state = TASK_RUNNING; \ 752 remove_wait_queue(&wq, &__wait); \ 755 #define wait_event(wq, condition) \ 759 __wait_event(wq, condition); \ 762 #define __wait_event_interruptible(wq, condition, ret) \ 764 wait_queue_t __wait; \ 765 init_waitqueue_entry(&__wait, current); \ 767 add_wait_queue(&wq, &__wait); \ 769 set_current_state(TASK_INTERRUPTIBLE); \ 772 if (!signal_pending(current)) { \ 776 ret = -ERESTARTSYS; \ 779 current->state = TASK_RUNNING; \ 780 remove_wait_queue(&wq, &__wait); \ 783 #define wait_event_interruptible(wq, condition) \ 787 __wait_event_interruptible(wq, condition, __ret); \ 791 #define REMOVE_LINKS(p) do { \ 792 (p)->next_task->prev_task = (p)->prev_task; \ 793 (p)->prev_task->next_task = (p)->next_task; \ 795 (p)->p_osptr->p_ysptr = (p)->p_ysptr; \ 797 (p)->p_ysptr->p_osptr = (p)->p_osptr; \ 799 (p)->p_pptr->p_cptr = (p)->p_osptr; \ 802 #define SET_LINKS(p) do { \ 803 (p)->next_task = &init_task; \ 804 (p)->prev_task = init_task.prev_task; \ 805 init_task.prev_task->next_task = (p); \ 806 init_task.prev_task = (p); \ 807 (p)->p_ysptr = NULL; \ 808 if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \ 809 (p)->p_osptr->p_ysptr = p; \ 810 (p)->p_pptr->p_cptr = p; \ 813 #define for_each_task(p) \ 814 for (p = &init_task ; (p = p->next_task) != &init_task ; ) 816 #define next_thread(p) \ 817 list_entry((p)->thread_group.next, struct task_struct, thread_group) 819 staticinlinevoiddel_from_runqueue(struct task_struct
* p
) 822 list_del(&p
->run_list
); 823 p
->run_list
.next
= NULL
; 826 staticinlineinttask_on_runqueue(struct task_struct
*p
) 828 return(p
->run_list
.next
!= NULL
); 831 staticinlinevoidunhash_process(struct task_struct
*p
) 833 if(task_on_runqueue(p
))BUG(); 834 write_lock_irq(&tasklist_lock
); 838 list_del(&p
->thread_group
); 839 write_unlock_irq(&tasklist_lock
); 842 staticinlinevoidtask_lock(struct task_struct
*p
) 844 spin_lock(&p
->alloc_lock
); 847 staticinlinevoidtask_unlock(struct task_struct
*p
) 849 spin_unlock(&p
->alloc_lock
); 852 /* write full pathname into buffer and return start of pathname */ 853 staticinlinechar*d_path(struct dentry
*dentry
,struct vfsmount
*vfsmnt
, 857 struct vfsmount
*rootmnt
; 859 read_lock(¤t
->fs
->lock
); 860 rootmnt
=mntget(current
->fs
->rootmnt
); 861 root
=dget(current
->fs
->root
); 862 read_unlock(¤t
->fs
->lock
); 863 spin_lock(&dcache_lock
); 864 res
=__d_path(dentry
, vfsmnt
, root
, rootmnt
, buf
, buflen
); 865 spin_unlock(&dcache_lock
); 871 #endif/* __KERNEL__ */