一:task_struct结构体分析
1、进程有两种特殊形式:没有用户虚拟地址空间的进程叫内核线程,共享用户虚拟地址空间的进程叫作用户线程。共享同一个用户虚拟地址空间的所有用户线程叫线程组。
C语言标准库进程 Linux内核进程
包括多个个线程的进程 线程组
只有一个线程的进程 任务或进程
线程 共享用户虚拟地址空间的进程
2、Linux内核提供API函数来设置进程状态:
TASK_RUNING (可运行状态或者可就绪状态)
TASK_INTERRUPTIBLE(课终端睡眠状态,又叫浅睡眠状态)
TASK_UNINTERUPTIBLE(不可中断状态,又叫深度睡眠状态,我们可以通过ps命令产看被标记为D状态的进程)
TASK_STOPPED(终止状态)
EXIT_ZOMBIE(僵尸状态)
3、Linux内核目录结构
arch:不同平台体系结构的相关代码
block:设备驱动
doucmentation:描述模块功能和协议规范
drivers:驱动程序(USB总线、PCI总线、网卡驱动、显卡等))
fs:虚拟文件系统VFS代码
include:内核源码依赖的大部分头文件
init:内核初始化代码,直接关联到内存各个组件入口
ipc:进程间通信实现
kernel:内核核心代码(进程管理、IPQ管理)
lib:C标准库的子集
license:Linux内核根据Licenses/preferredGPL-2.0中提供GNU通用许可版本2
mm:内存管理相关实现操作
net:网络协议代码(TCP、IPv6、Wifi等)
samples:内核实例代码
sound:声卡驱动源码
tools:与内核交互
usr:用户打包和压缩内核的实现的源码
virt:/kvm虚拟化目录相关实现
4、Linux进程描述符task_struct结构体类型来描述,具体源码分析如下:5.6.18
include/linux/sched.h
// 进程描述符
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK/** For reasons of header soup (see current_thread_info()), this* must be the first element of task_struct.*/struct thread_info thread_info;
#endif/* -1 unrunnable, 0 runnable, >0 stopped: */volatile long state; // 判断进程的状态标志/** This begins the randomizable portion of task_struct. Only* scheduling-critical items should be added above here.*/randomized_struct_fields_startvoid *stack; // 指向内核栈refcount_t usage;/* Per task flags (PF_*), defined further below: */unsigned int flags;unsigned int ptrace;#ifdef CONFIG_SMPstruct llist_node wake_entry;int on_cpu;
#ifdef CONFIG_THREAD_INFO_IN_TASK/* Current CPU: */unsigned int cpu;
#endifunsigned int wakee_flips;unsigned long wakee_flip_decay_ts;struct task_struct *last_wakee;/** recent_used_cpu is initially set as the last CPU used by a task* that wakes affine another task. Waker/wakee relationships can* push tasks around a CPU where each wakeup moves to the next one.* Tracking a recently used CPU allows a quick search for a recently* used CPU that may be idle.*/int recent_used_cpu;int wake_cpu;
#endifint on_rq;// 下面4个成员为:进程调度策略和优先级int prio;int static_prio;int normal_prio;unsigned int rt_priority;const struct sched_class *sched_class;struct sched_entity se;struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHEDstruct task_group *sched_task_group;
#endifstruct sched_dl_entity dl;#ifdef CONFIG_UCLAMP_TASK/* Clamp values requested for a scheduling entity */struct uclamp_se uclamp_req[UCLAMP_CNT];/* Effective clamp values used for a scheduling entity */struct uclamp_se uclamp[UCLAMP_CNT];
#endif#ifdef CONFIG_PREEMPT_NOTIFIERS/* List of struct preempt_notifier: */struct hlist_head preempt_notifiers;
#endif#ifdef CONFIG_BLK_DEV_IO_TRACEunsigned int btrace_seq;
#endifunsigned int policy;int nr_cpus_allowed;const cpumask_t *cpus_ptr;cpumask_t cpus_mask;#ifdef CONFIG_PREEMPT_RCUint rcu_read_lock_nesting;union rcu_special rcu_read_unlock_special;struct list_head rcu_node_entry;struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */#ifdef CONFIG_TASKS_RCUunsigned long rcu_tasks_nvcsw;u8 rcu_tasks_holdout;u8 rcu_tasks_idx;int rcu_tasks_idle_cpu;struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */struct sched_info sched_info;struct list_head tasks;
#ifdef CONFIG_SMPstruct plist_node pushable_tasks;struct rb_node pushable_dl_tasks;
#endif// 这两个指针指向内存描述符。// 进程:mm/active_mm 指向同一个内存描述符// 内核线程:mm是空指针// 当内核执行的时候,active_mm指向从进程借用内存描述符struct mm_struct *mm;struct mm_struct *active_mm;/* Per-thread vma caching: */struct vmacache vmacache;#ifdef SPLIT_RSS_COUNTINGstruct task_rss_stat rss_stat;
#endifint exit_state;int exit_code;int exit_signal;/* The signal sent when the parent dies: */int pdeath_signal;/* JOBCTL_*, siglock protected: */unsigned long jobctl;/* Used for emulating ABI behavior of previous Linux versions: */unsigned int personality;/* Scheduler bits, serialized by scheduler locks: */unsigned sched_reset_on_fork:1;unsigned sched_contributes_to_load:1;unsigned sched_migrated:1;unsigned sched_remote_wakeup:1;
#ifdef CONFIG_PSIunsigned sched_psi_wake_requeue:1;
#endif/* Force alignment to the next boundary: */unsigned :0;/* Unserialized, strictly 'current' *//* Bit to tell LSMs we're in execve(): */unsigned in_execve:1;unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASKunsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCGunsigned in_user_fault:1;
#endif
#ifdef CONFIG_COMPAT_BRKunsigned brk_randomized:1;
#endif
#ifdef CONFIG_CGROUPS/* disallow userland-initiated cgroup migration */unsigned no_cgroup_migration:1;/* task is frozen/stopped (used by the cgroup freezer) */unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP/* to be used once the psi infrastructure lands upstream. */unsigned use_memdelay:1;
#endifunsigned long atomic_flags; /* Flags requiring atomic access. */struct restart_block restart_block;// 全局的进程号// 全局的线程组标识符pid_t pid;pid_t tgid;#ifdef CONFIG_STACKPROTECTOR/* Canary value for the -fstack-protector GCC feature: */unsigned long stack_canary;
#endif/** Pointers to the (original) parent process, youngest child, younger sibling,* older sibling, respectively. (p->father can be replaced with* p->real_parent->pid)*//* Real parent process: */struct task_struct __rcu *real_parent; // 指向真实的父进程/* Recipient of SIGCHLD, wait4() reports: */ // 指向父进程struct task_struct __rcu *parent;/** Children/sibling form the list of natural children:*/struct list_head children;struct list_head sibling;struct task_struct *group_leader; // 指向线程组的组长/** 'ptraced' is the list of tasks this task is using ptrace() on.** This includes both natural children and PTRACE_ATTACH targets.* 'ptrace_entry' is this task's link on the p->parent->ptraced list.*/struct list_head ptraced;struct list_head ptrace_entry;/* PID/PID hash table linkage. */struct pid *thread_pid;struct hlist_node pid_links[PIDTYPE_MAX];struct list_head thread_group;struct list_head thread_node;struct completion *vfork_done;/* CLONE_CHILD_SETTID: */int __user *set_child_tid;/* CLONE_CHILD_CLEARTID: */int __user *clear_child_tid;u64 utime;u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIMEu64 utimescaled;u64 stimescaled;
#endifu64 gtime;struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GENstruct vtime vtime;
#endif#ifdef CONFIG_NO_HZ_FULLatomic_t tick_dep_mask;
#endif/* Context switch counts: */unsigned long nvcsw;unsigned long nivcsw;/* Monotonic time in nsecs: */u64 start_time;/* Boot based time in nsecs: */u64 start_boottime;/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */unsigned long min_flt;unsigned long maj_flt;/* Empty if CONFIG_POSIX_CPUTIMERS=n */struct posix_cputimers posix_cputimers;/* Process credentials: *//* Tracer's credentials at attach: */const struct cred __rcu *ptracer_cred;/* Objective and real subjective task credentials (COW): */const struct cred __rcu *real_cred;/* Effective (overridable) subjective task credentials (COW): */const struct cred __rcu *cred;#ifdef CONFIG_KEYS/* Cached requested key. */struct key *cached_requested_key;
#endif/** executable name, excluding path.** - normally initialized setup_new_exec()* - access it with [gs]et_task_comm()* - lock it with task_lock()*/char comm[TASK_COMM_LEN];struct nameidata *nameidata;// 用NUIX系统:信号量和共享内存
#ifdef CONFIG_SYSVIPCstruct sysv_sem sysvsem;struct sysv_shm sysvshm;
#endif#ifdef CONFIG_DETECT_HUNG_TASKunsigned long last_switch_count;unsigned long last_switch_time;
#endif/* Filesystem information: */struct fs_struct *fs; // 该成员属于文件系统信息,主要是进程的根目录和当前工作目录/* Open file information: */struct files_struct *files; // 打开文件列表/* Namespaces: */struct nsproxy *nsproxy;/* Signal handlers: */struct signal_struct *signal;struct sighand_struct __rcu *sighand;sigset_t blocked;sigset_t real_blocked;/* Restored if set_restore_sigmask() was used: */sigset_t saved_sigmask;struct sigpending pending;unsigned long sas_ss_sp;size_t sas_ss_size;unsigned int sas_ss_flags;struct callback_head *task_works;#ifdef CONFIG_AUDIT
#ifdef CONFIG_AUDITSYSCALLstruct audit_context *audit_context;
#endifkuid_t loginuid;unsigned int sessionid;
#endifstruct seccomp seccomp;/* Thread group tracking: */u64 parent_exec_id;u64 self_exec_id;/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */spinlock_t alloc_lock;/* Protection of the PI data structures: */raw_spinlock_t pi_lock;struct wake_q_node wake_q;#ifdef CONFIG_RT_MUTEXES/* PI waiters blocked on a rt_mutex held by this task: */struct rb_root_cached pi_waiters;/* Updated under owner's pi_lock and rq lock */struct task_struct *pi_top_task;/* Deadlock detection and priority inheritance handling: */struct rt_mutex_waiter *pi_blocked_on;
#endif#ifdef CONFIG_DEBUG_MUTEXES/* Mutex deadlock detection: */struct mutex_waiter *blocked_on;
#endif#ifdef CONFIG_DEBUG_ATOMIC_SLEEPint non_block_count;
#endif#ifdef CONFIG_TRACE_IRQFLAGSunsigned int irq_events;unsigned long hardirq_enable_ip;unsigned long hardirq_disable_ip;unsigned int hardirq_enable_event;unsigned int hardirq_disable_event;int hardirqs_enabled;int hardirq_context;unsigned long softirq_disable_ip;unsigned long softirq_enable_ip;unsigned int softirq_disable_event;unsigned int softirq_enable_event;int softirqs_enabled;int softirq_context;
#endif#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48ULu64 curr_chain_key;int lockdep_depth;unsigned int lockdep_recursion;struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif#ifdef CONFIG_UBSANunsigned int in_ubsan;
#endif/* Journalling filesystem info: */void *journal_info;/* Stacked block device info: */struct bio_list *bio_list;#ifdef CONFIG_BLOCK/* Stack plugging: */struct blk_plug *plug;
#endif/* VM state: */struct reclaim_state *reclaim_state;struct backing_dev_info *backing_dev_info;struct io_context *io_context;#ifdef CONFIG_COMPACTIONstruct capture_control *capture_control;
#endif/* Ptrace state: */unsigned long ptrace_message;kernel_siginfo_t *last_siginfo;struct task_io_accounting ioac;
#ifdef CONFIG_PSI/* Pressure stall state */unsigned int psi_flags;
#endif
#ifdef CONFIG_TASK_XACCT/* Accumulated RSS usage: */u64 acct_rss_mem1;/* Accumulated virtual memory usage: */u64 acct_vm_mem1;/* stime + utime since last update: */u64 acct_timexpd;
#endif
#ifdef CONFIG_CPUSETS/* Protected by ->alloc_lock: */nodemask_t mems_allowed;/* Seqence number to catch updates: */seqcount_t mems_allowed_seq;int cpuset_mem_spread_rotor;int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS/* Control Group info protected by css_set_lock: */struct css_set __rcu *cgroups;/* cg_list protected by css_set_lock and tsk->alloc_lock: */struct list_head cg_list;
#endif
#ifdef CONFIG_X86_CPU_RESCTRLu32 closid;u32 rmid;
#endif
#ifdef CONFIG_FUTEXstruct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPATstruct compat_robust_list_head __user *compat_robust_list;
#endifstruct list_head pi_state_list;struct futex_pi_state *pi_state_cache;struct mutex futex_exit_mutex;unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTSstruct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];struct mutex perf_event_mutex;struct list_head perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPTunsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA/* Protected by alloc_lock: */struct mempolicy *mempolicy;short il_prev;short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCINGint numa_scan_seq;unsigned int numa_scan_period;unsigned int numa_scan_period_max;int numa_preferred_nid;unsigned long numa_migrate_retry;/* Migration stamp: */u64 node_stamp;u64 last_task_numa_placement;u64 last_sum_exec_runtime;struct callback_head numa_work;/** This pointer is only modified for current in syscall and* pagefault context (and for tasks being destroyed), so it can be read* from any of the following contexts:* - RCU read-side critical section* - current->numa_group from everywhere* - task's runqueue locked, task not running*/struct numa_group __rcu *numa_group;/** numa_faults is an array split into four regions:* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer* in this precise order.** faults_memory: Exponential decaying average of faults on a per-node* basis. Scheduling placement decisions are made based on these* counts. The values remain static for the duration of a PTE scan.* faults_cpu: Track the nodes the process was running on when a NUMA* hinting fault was incurred.* faults_memory_buffer and faults_cpu_buffer: Record faults per node* during the current scan window. When the scan completes, the counts* in faults_memory and faults_cpu decay and these values are copied.*/unsigned long *numa_faults;unsigned long total_numa_faults;/** numa_faults_locality tracks if faults recorded during the last* scan window were remote/local or failed to migrate. The task scan* period is adapted based on the locality of the faults with different* weights depending on whether they were shared or private faults*/unsigned long numa_faults_locality[3];unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */#ifdef CONFIG_RSEQstruct rseq __user *rseq;u32 rseq_sig;/** RmW on rseq_event_mask must be performed atomically* with respect to preemption.*/unsigned long rseq_event_mask;
#endifstruct tlbflush_unmap_batch tlb_ubc;union {refcount_t rcu_users;struct rcu_head rcu;};/* Cache last used pipe for splice(): */struct pipe_inode_info *splice_pipe;struct page_frag task_frag;#ifdef CONFIG_TASK_DELAY_ACCTstruct task_delay_info *delays;
#endif#ifdef CONFIG_FAULT_INJECTIONint make_it_fail;unsigned int fail_nth;
#endif/** When (nr_dirtied >= nr_dirtied_pause), it's time to call* balance_dirty_pages() for a dirty throttling pause:*/int nr_dirtied;int nr_dirtied_pause;/* Start of a write-and-pause period: */unsigned long dirty_paused_when;#ifdef CONFIG_LATENCYTOPint latency_record_count;struct latency_record latency_record[LT_SAVECOUNT];
#endif/** Time slack values; these are used to round up poll() and* select() etc timeout values. These are in nanoseconds.*/u64 timer_slack_ns;u64 default_timer_slack_ns;#ifdef CONFIG_KASANunsigned int kasan_depth;
#endif#ifdef CONFIG_FUNCTION_GRAPH_TRACER/* Index of current stored address in ret_stack: */int curr_ret_stack;int curr_ret_depth;/* Stack of return addresses for return function tracing: */struct ftrace_ret_stack *ret_stack;/* Timestamp for last schedule: */unsigned long long ftrace_timestamp;/** Number of functions that haven't been traced* because of depth overrun:*/atomic_t trace_overrun;/* Pause tracing: */atomic_t tracing_graph_pause;
#endif#ifdef CONFIG_TRACING/* State flags for use by tracers: */unsigned long trace;/* Bitmask and counter of trace recursion: */unsigned long trace_recursion;
#endif /* CONFIG_TRACING */#ifdef CONFIG_KCOV/* See kernel/kcov.c for more details. *//* Coverage collection mode enabled for this task (0 if disabled): */unsigned int kcov_mode;/* Size of the kcov_area: */unsigned int kcov_size;/* Buffer for coverage collection: */void *kcov_area;/* KCOV descriptor wired with this task or NULL: */struct kcov *kcov;/* KCOV common handle for remote coverage collection: */u64 kcov_handle;/* KCOV sequence number: */int kcov_sequence;
#endif#ifdef CONFIG_MEMCGstruct mem_cgroup *memcg_in_oom;gfp_t memcg_oom_gfp_mask;int memcg_oom_order;/* Number of pages to reclaim on returning to userland: */unsigned int memcg_nr_pages_over_high;/* Used by memcontrol for targeted memcg charge: */struct mem_cgroup *active_memcg;
#endif#ifdef CONFIG_BLK_CGROUPstruct request_queue *throttle_queue;
#endif#ifdef CONFIG_UPROBESstruct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)unsigned int sequential_io;unsigned int sequential_io_avg;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEPunsigned long task_state_change;
#endifint pagefault_disabled;
#ifdef CONFIG_MMUstruct task_struct *oom_reaper_list;
#endif
#ifdef CONFIG_VMAP_STACKstruct vm_struct *stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK/* A live task holds one reference: */refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCHint patch_state;
#endif
#ifdef CONFIG_SECURITY/* Used by LSM modules for access restriction: */void *security;
#endif#ifdef CONFIG_GCC_PLUGIN_STACKLEAKunsigned long lowest_stack;unsigned long prev_lowest_stack;
#endif/** New fields for task_struct should be added above here, so that* they are included in the randomized portion of task_struct.*/randomized_struct_fields_end/* CPU-specific state of this task: */struct thread_struct thread;/** WARNING: on x86, 'thread_struct' contains a variable-sized* structure. It *MUST* be at the end of 'task_struct'.** Do not put anything below here!*/
};
5、进程优先级
// 下面4个成员为:进程调度策略和优先级
int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;
优先级 | 限期进程 | 实时进程 | 普通进程 |
prio调度优先级(数值越小,优先级越高) | 大多数情况下prio等于normal_prio。特殊情况下,如果进程X占有实时互斥锁,进程Y正在等待锁,进程Y的优先级比进程X优先级高,那么吧X的优先级临时提高到进程Y的优先级,即进程X的prio的值等于进程y的prio值 | ||
static_prio静态优先级 | 总是为0(无意义) | 总是为0(无意义) | 120+nice值,数值越小,表示优先级越高 |
normal_prio正常优先级 | -1 | 99-rt_priority | static_prio |
rt_priority实时优先级 | 总是为0(无意义) | 实时进程的优先级,范围1-99,数值越大优先级越高 | 总是为0(无意义) |
6、内核线程:它是独立运行在内核空间的进程,与普通用户进程区别在于内核线程没有独立的地址空间。task_struct数据结构里面有一个成员指针mm设置为NULL,它只能独立运行在内核空间。
二、进程调度CFS及4个调度类
1、调度:就是按照某种调度的算法设计,从进程的就绪队列当中选取进程分配CPU,主要是协调对CPU等等相关的资源使用。进程调度目的:最大限度利用CPU时间。如果调度器支持就绪状态切换到执行状态,同时支持执行状态切换到就绪状态,称该调度器为抢占式调度器。
2、调度类sched_class结构体源码分析:
keenel/sched/sched.h
// 调度类sched_class结构体类型
struct sched_class {// 操作系统当中有多个调度类,按照调度优先级排成一个链表const struct sched_class *next;#ifdef CONFIG_UCLAMP_TASKint uclamp_enabled;
#endif// 将进程加入到执行队列当中,即将调度实体(进程)存放到红黑树当中,并对nr_running变量自动加1void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);// 从执行队列当中删除进程,并对nr_running变量自动减1void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);// 放弃CPU执行权限 实际上该函数执行先出队后入队,在这种情况它直接将调度实体存放在红黑树的最右端void (*yield_task) (struct rq *rq);bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);// 专门用于检查当前进程是否可被新进程抢占void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);// 选择下一个要执行的进程struct task_struct *(*pick_next_task)(struct rq *rq);// 将进程施加到运行队列当中void (*put_prev_task)(struct rq *rq, struct task_struct *p);void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);#ifdef CONFIG_SMPint (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);// 为进程选择一个合适的CPUint (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);// 迁移任务到另一个CPUvoid (*migrate_task_rq)(struct task_struct *p, int new_cpu);// 专门用于唤醒进程void (*task_woken)(struct rq *this_rq, struct task_struct *task);//修改进程在CPU的亲和力void (*set_cpus_allowed)(struct task_struct *p,const struct cpumask *newmask);// 启动/禁止运行队列void (*rq_online)(struct rq *rq);void (*rq_offline)(struct rq *rq);
#endifvoid (*task_tick)(struct rq *rq, struct task_struct *p, int queued);void (*task_fork)(struct task_struct *p);void (*task_dead)(struct task_struct *p);/** The switched_from() call is allowed to drop rq->lock, therefore we* cannot assume the switched_from/switched_to pair is serliazed by* rq->lock. They are however serialized by p->pi_lock.*/void (*switched_from)(struct rq *this_rq, struct task_struct *task);void (*switched_to) (struct rq *this_rq, struct task_struct *task);void (*prio_changed) (struct rq *this_rq, struct task_struct *task,int oldprio);unsigned int (*get_rr_interval)(struct rq *rq,struct task_struct *task);void (*update_curr)(struct rq *rq);#define TASK_SET_GROUP 0
#define TASK_MOVE_GROUP 1#ifdef CONFIG_FAIR_GROUP_SCHEDvoid (*task_change_group)(struct task_struct *p, int type);
#endif
};
3、调度器类可分为五种:
extern const struct sched_class stop_sched_class; // 停机调度类
extern const struct sched_class dl_sched_class; // 期限调度类
extern const struct sched_class rt_sched_class; // 实时调度类
extern const struct sched_class fair_sched_class; // 公平调度类
extern const struct sched_class idle_sched_class; // 空闲调度类
这5种调度类的优先级从高到低依次为:停机调度类-->期限调度类-->实时调度类-->公平调度类-->空闲调度类。
4、进程优先级,Linux内核优先级源码
include/linux/sched/prio.h
// Linux内核优先级
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
5、进程分类
实时进程:优先级高、需要立即被执行的进程
普通进程:优先级低、更长执行时间的进程
进程的优先级是一个0--139的整数直接来表示,数字越小优先级越高,其中优先级0-99留给实时进程,100-139留给普通进程。
6、内核调度策略
Linux内核提供一些调度策略供用户应用程序来选择调度器,Linux内核调度策略源码如下:
inluce/uapi/linux/sched.h
/** Scheduling policies*/// Linux内核调度策略
#define SCHED_NORMAL 0 // 普通进程调度策略
#define SCHED_FIFO 1 // 实时进程调度策略
#define SCHED_RR 2 // 实时进程调度策略
#define SCHED_BATCH 3 // 普通进程调度策略
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5 // 普通进程调度策略
#define SCHED_DEADLINE 6 // 限期进程调度策略
三、RCU机制及内存优化屏障
1、RCU机制:应用场景是链表,有效地提高遍历读取数据的效率,读取链表有成员数据的时候通常只需要rcu_read_lock(),允许多个线程同时读取链表,并且允许一个同时修改链表。
2、RCU意思是读-复制-更新。读拷贝更新(RCU)模式添加链表项对应函数list_add_rcu(...)。读拷贝更新(RCU)模式删除链表项对应函数list_del_rcu(...)。读拷贝更新(RCU)模式更新 链表项list_repalce_rcu(...)。
在整个操作过程中,有时要防止编译器和CPU优化代码执行顺序,smp_wmb()保证在它之前的两行代码执行完毕之后再执行后两行。
3、编译器优化:为提高系统性能,编译器在不影响逻辑的情况下会调整至零点执行顺序。
4、CPU执行优化:为提高流水线的性能,CPU的乱序执行会让后面的寄存器冲的指令优先于前面指令完成。
5、内存屏障:
内存屏障是一种保证内存访问顺序的方法,解决内存访问乱序问题。
假设使用禁止内核抢占方法保护临界区:
preempt_desable();
临界区
preempt_enable();
临界区
preempt_desable();
preempt_enable();
preempt_desable();
preempt_ensable();
临界区
6、GCC编译器定义的宏
include/linux/compiler-gcc.h
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
关键字为__volatile__告诉编译器:禁止优化代码,不需要改变barrier()前面的代码块、barrier()和后面代码块这3个代码块的顺序。
7、处理器内存屏障
处理器内存屏障解决CPU 之间的内存访问乱序问题和处理器访问外围设备的乱序问题。
内存屏障类型 | 强制性的内存屏障 | SMP内存屏障 |
通用内存屏障 | mb() | smp_mb() |
写内存屏障 | wmb() | smp_wmp() |
读内存屏障 | rmb() | smp_rmb() |
数据依赖屏障 | read_barrier_depends() | smp_read_barrier_depends() |
除数据依赖屏障之外,所有处理器内存屏障隐含编译器优化屏障。
参考连接:https://github.com/0voice