天天看點

淺析linux核心中timer定時器的生成和sofirq軟中斷調用流程【轉】

<code>淺析linux核心中timer定時器的生成和sofirq軟中斷調用流程   mod_timer添加的定時器timer在核心的軟中斷中發生調用,__run_timers會spin_lock_irq(&amp;base-&gt;lock);禁止cpu中斷,是以我們的timer回調處理函數handler工作在irq關閉的環境中,是以需要作很多考慮,比如在handler中盡量不要執行會引起pending的函數調用,比如kmalloc之類可能引起pending的操作,否則會使kernel永遠停在我們的handler中不能傳回,這樣kernel将因為我們ko設計上的失敗而當機[luther.gliethttp]!   我們可以使用如下幾行語句,向我們的ko驅動添加一個timer定時器,來處理時間事件: struct __wlanwlan_check_tx_flow_timer {     struct timer_list timer;     int timer_freq; } wlan_check_tx_flow_timer = {         .timer_freq = 8*1000, }; static void wlan_check_tx_flow_timer_handler(unsigned long data) {     ...     //重新啟動timer定時器      mod_timer(&amp;wlan_check_tx_flow_timer.timer, jiffies + msecs_to_jiffies(wlan_check_tx_flow_timer.timer_freq));     ... } //設定定時器 setup_timer(&amp;wlan_check_tx_flow_timer.timer, wlan_check_tx_flow_timer_handler, (unsigned long)&amp;wlan_check_tx_flow_timer); //添加定時器 mod_timer(&amp;wlan_check_tx_flow_timer.timer, jiffies + msecs_to_jiffies(wlan_check_tx_flow_timer.timer_freq)); 那麼這個wlan_check_tx_flow_timer_handler處理函數在什麼時候被調用的呢?那麼我們追入核心中,看看kernel對定時器的具體管理. 首先kernel在啟動的最前面注冊TIMER_SOFTIRQ的處理函數[luther.gliethttp], start_kernel =&gt;init_timers =&gt;open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); 那麼由誰來調用raise_softirq(TIMER_SOFTIRQ);觸發TIMER_SOFTIRQ軟中斷呢,這就和平台相關了,對于pxa935處理器來說[luther.gliethttp], MACHINE_START(LUTHER, "luther")     .phys_io = 0x40000000,     .boot_params = 0xa0000100,     .io_pg_offst = (io_p2v(0x40000000) &gt;&gt; 18) &amp; 0xfffc,     .map_io = pxa_map_io,     .init_irq = pxa3xx_init_irq,     .timer = &amp;pxa_timer,     .init_machine = luther_init, MACHINE_END =&gt;pxa_timer_init//平台對應的定時器初始化 ==&gt;pxa_timer_irq.dev_id = &amp;ckevt_32ktimer; ==&gt;setup_irq(IRQ_OST_4_11, &amp;pxa_timer_irq); //32768的rtc ==&gt;clockevents_register_device(&amp;ckevt_32ktimer); pxa_timer_interrupt中斷處理函數 =&gt;c-&gt;event_handler(c);也就是tick_handle_periodic系統時鐘函數 =&gt;tick_handle_periodic =&gt;update_process_times =&gt;run_local_timers =&gt;raise_softirq(TIMER_SOFTIRQ); 這裡僅僅是觸發了TIMER_SOFTIRQ軟中斷,那麼在什麼地方處理我們mod_timer添加的timer定時器處理函數wlan_check_tx_flow_timer_handler呢[luther.gliethttp]? __irq_svc://核心中發生的中斷 __irq_usr://使用者空間時發生的中斷 =&gt;asm_do_IRQ =&gt;irq_exit =&gt;do_softirq =&gt;__do_softirq =&gt;調用上面注冊的run_timer_softirq軟中斷處理函數 =&gt;run_timer_softirq =&gt;__run_timers static inline void __run_timers(struct tvec_base *base) {     struct timer_list *timer;     spin_lock_irq(&amp;base-&gt;lock);//禁止中斷     while (time_after_eq(jiffies, base-&gt;timer_jiffies)) {         ...         if (時間到了) {         ...         fn = timer-&gt;function;         data = timer-&gt;data;         fn(data);//這就是我們上面添加的static void wlan_check_tx_flow_timer_handler(unsigned long data);定時器處理函數了.         ...         }         ...     }     set_running_timer(base, NULL);     spin_unlock_irq(&amp;base-&gt;lock);//打開中斷 } //================ include/asm/hardirq.h typedef struct {     unsigned int __softirq_pending;     unsigned int local_timer_irqs; } ____cacheline_aligned irq_cpustat_t; //================ kernel/softirq.c|45| irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; #ifndef __ARCH_IRQ_STAT irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;//在這裡定義irq_stat存儲空間 EXPORT_SYMBOL(irq_stat); #endif //================ include/linux/irq_cpustat.h #ifndef __ARCH_IRQ_STAT //引用的就是上面的irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; extern irq_cpustat_t irq_stat[];        /* defined in asm/hardirq.h */ #define __IRQ_STAT(cpu, member)    (irq_stat[cpu].member) #endif //================ arch/arm/kernel/entry-armv.S|331| .word    irq_stat #ifdef CONFIG_PREEMPT svc_preempt:     teq    r8, #0                @ was preempt count = 0     ldreq    r6, .LCirq_stat //操作     movne    pc, lr                @ no     ldr    r0, [r6, #4]            @ local_irq_count     ldr    r1, [r6, #8]            @ local_bh_count     adds    r0, r0, r1     movne    pc, lr     mov    r7, #0                @ preempt_schedule_irq     str    r7, [tsk, #TI_PREEMPT]        @ expects preempt_count == 0 1:    bl    preempt_schedule_irq        @ irq en/disable is done inside     ldr    r0, [tsk, #TI_FLAGS]        @ get new tasks TI_FLAGS     tst    r0, #_TIF_NEED_RESCHED     beq    preempt_return            @ go again     b    1b #endif .LCirq_stat:     .word    irq_stat //引用irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;位址 #endif   /* arch independent irq_stat fields */ #define local_softirq_pending() \     __IRQ_STAT(smp_processor_id(), __softirq_pending) #define __ARCH_IRQ_EXIT_IRQS_DISABLED    1 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED # define invoke_softirq()    __do_softirq() //是這個 #else # define invoke_softirq()    do_softirq() #endif #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL &lt;&lt; (nr)); } while (0) inline void raise_softirq_irqoff(unsigned int nr) {     __raise_softirq_irqoff(nr);         if (!in_interrupt())         wakeup_softirqd(); } void raise_softirq(unsigned int nr) {     unsigned long flags;     local_irq_save(flags);     raise_softirq_irqoff(nr);     local_irq_restore(flags); } =&gt;s3c2410_timer_interrupt =&gt;timer_tick =&gt;pxa_timer_init ==&gt;pxa_timer_irq.dev_id = &amp;ckevt_32ktimer; ==&gt;setup_irq(IRQ_OST_4_11, &amp;pxa_timer_irq); //32768的rtc ==&gt;clockevents_register_device(&amp;ckevt_32ktimer); =&gt;clockevents_register_device =&gt;clockevents_do_notify =&gt;raw_notifier_call_chain(&amp;clockevents_chain, reason, dev); =&gt;__raw_notifier_call_chain =&gt;notifier_call_chain(&amp;nh-&gt;head, val, v, nr_to_call, nr_calls); =&gt;nb-&gt;notifier_call(nb, val, v);就是tick_notify start_kernel =&gt;tick_init static struct notifier_block tick_notifier = {     .notifier_call = tick_notify, }; void __init tick_init(void) {     clockevents_register_notifier(&amp;tick_notifier); } clockevents_register_notifier =&gt;raw_notifier_chain_register(&amp;clockevents_chain, nb); =&gt;notifier_chain_register将tick_notifier添加到clockevents_chain這個單向連結清單中[luther.gliethttp] static int tick_notify(struct notifier_block *nb, unsigned long reason,              void *dev) {     switch (reason) {     case CLOCK_EVT_NOTIFY_ADD:         return tick_check_new_device(dev);     ...     return NOTIFY_OK; } =&gt;tick_notify =&gt;tick_check_new_device =&gt;tick_setup_device(td, newdev, cpu, cpumask); static void tick_setup_device(struct tick_device *td,              struct clock_event_device *newdev, int cpu,              cpumask_t cpumask) {     ktime_t next_event;     void (*handler)(struct clock_event_device *) = NULL;     /*      * First device setup ?      */     if (!td-&gt;evtdev) {         /*          * If no cpu took the do_timer update, assign it to          * this cpu:          */         if (tick_do_timer_cpu == -1) {             tick_do_timer_cpu = cpu;             tick_next_period = ktime_get();             tick_period = ktime_set(0, NSEC_PER_SEC / HZ);         }         /*          * Startup in periodic mode first.          */         td-&gt;mode = TICKDEV_MODE_PERIODIC;//設定第1個tick裝置為TICKDEV_MODE_PERIODIC模式     } else {         handler = td-&gt;evtdev-&gt;event_handler;         next_event = td-&gt;evtdev-&gt;next_event;     }     td-&gt;evtdev = newdev;     ...     if (td-&gt;mode == TICKDEV_MODE_PERIODIC)         tick_setup_periodic(newdev, 0);     else         tick_setup_oneshot(newdev, handler, next_event); } void tick_setup_periodic(struct clock_event_device *dev, int broadcast) {     tick_set_periodic_handler(dev, broadcast);//設定event_handler處理函數為dev-&gt;event_handler = tick_handle_periodic;     /* Broadcast setup ? */     if (!tick_device_is_functional(dev))         return;     if (dev-&gt;features &amp; CLOCK_EVT_FEAT_PERIODIC) {         clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);     } else {         unsigned long seq;         ktime_t next;         do {             seq = read_seqbegin(&amp;xtime_lock);             next = tick_next_period;         } while (read_seqretry(&amp;xtime_lock, seq));         clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);         for (;;) {             if (!clockevents_program_event(dev, next, ktime_get()))                 return;             next = ktime_add(next, tick_period);         }     } } void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) {     if (!broadcast)         dev-&gt;event_handler = tick_handle_periodic;     else         dev-&gt;event_handler = tick_handle_periodic_broadcast; } =&gt;pxa_timer_interrupt {     ...     if (OSSR &amp; OST_C4) {         OIER &amp;= ~OST_C4;         OSSR = OST_C4;         if (timer32k_enabled)             c-&gt;event_handler(c);//調用tick_handle_periodic處理函數,作為     }     ... } void tick_handle_periodic(struct clock_event_device *dev) {     int cpu = smp_processor_id();     ktime_t next;     tick_periodic(cpu);//調用do_timer(1);将jiffies_64加1     if (dev-&gt;mode != CLOCK_EVT_MODE_ONESHOT)         return;     /*      * Setup the next period for devices, which do not have      * periodic mode:      */     next = ktime_add(dev-&gt;next_event, tick_period);     for (;;) {         if (!clockevents_program_event(dev, next, ktime_get()))             return;         tick_periodic(cpu);         next = ktime_add(next, tick_period);     } } static void tick_periodic(int cpu) {     if (tick_do_timer_cpu == cpu) {         write_seqlock(&amp;xtime_lock);         /* Keep track of the next tick event */         tick_next_period = ktime_add(tick_next_period, tick_period);         do_timer(1);         write_sequnlock(&amp;xtime_lock);     }     update_process_times(user_mode(get_irq_regs()));     profile_tick(CPU_PROFILING); } arch/arm/kernel/time.c|332| update_process_times(user_mode(get_irq_regs())); =&gt;update_process_times =&gt;run_local_timers =&gt;raise_softirq(TIMER_SOFTIRQ);//觸發軟中斷,當irq_exit時調用__do_softirq來處理 =&gt;run_timer_softirq =&gt;__run_timers =&gt; fn = timer-&gt;function;//執行 data = timer-&gt;data; fn(data); //================ include/asm/arch-pxa/entry-macro.S|22| .macro    get_irqnr_and_base, irqnr, irqstat, base, tmp //pxa擷取irq中斷号函數 //================ arch/arm/kernel/entry-armv.S|37| bne    asm_do_IRQ     .macro    irq_handler     get_irqnr_preamble r5, lr 1:    get_irqnr_and_base r0, r6, r5, lr //擷取irq中斷号,存儲到r0寄存器中,作為參數傳遞給asm_do_IRQ     movne    r1, sp     @     @ routine called with r0 = irq number, r1 = struct pt_regs *     @     adrne    lr, 1b     bne    asm_do_IRQ     ... //================     .align    5 __irq_svc://核心中發生的中斷     svc_entry     ...     irq_handler     ... //================     .align    5 __irq_usr://使用者空間時發生的中斷     usr_entry     ...     irq_handler     ... //================     .macro    vector_stub, name, mode, correction=0     .align    5 vector_\name:     .if \correction     sub    lr, lr, #\correction     .endif     @     @ Save r0, lr_&lt;exception&gt; (parent PC) and spsr_&lt;exception&gt;     @ (parent CPSR)     @     stmia    sp, {r0, lr}        @ save r0, lr     mrs    lr, spsr     str    lr, [sp, #8]        @ save spsr     @     @ Prepare for SVC32 mode. IRQs remain disabled.     @     mrs    r0, cpsr     eor    r0, r0, #(\mode ^ SVC_MODE)     msr    spsr_cxsf, r0     @     @ the branch table must immediately follow this code     @     and    lr, lr, #0x0f //lr存儲了spsr,是以一共有16種cpu模式     mov    r0, sp //傳參     ldr    lr, [pc, lr, lsl #2]//取出相應模式下的處理函數指針,比如__irq_usr或者__irq_svc     movs    pc, lr            @ branch to handler in SVC mode     .endm //================     .globl    __stubs_start __stubs_start: /*  * Interrupt dispatcher  */     vector_stub    irq, IRQ_MODE, 4     .long    __irq_usr            @ 0 (USR_26 / USR_32)     .long    __irq_invalid        @ 1 (FIQ_26 / FIQ_32)     .long    __irq_invalid        @ 2 (IRQ_26 / IRQ_32)     .long    __irq_svc            @ 3 (SVC_26 / SVC_32)     .long    __irq_invalid            @ 4     .long    __irq_invalid            @ 5     .long    __irq_invalid            @ 6     .long    __irq_invalid            @ 7     .long    __irq_invalid            @ 8     .long    __irq_invalid            @ 9     .long    __irq_invalid            @ a     .long    __irq_invalid            @ b     .long    __irq_invalid            @ c     .long    __irq_invalid            @ d     .long    __irq_invalid            @ e     .long    __irq_invalid            @ f //================     .globl    __vectors_start __vectors_start:     swi    SYS_ERROR0     b    vector_und + stubs_offset     ldr    pc, .LCvswi + stubs_offset     b    vector_pabt + stubs_offset     b    vector_dabt + stubs_offset     b    vector_addrexcptn + stubs_offset     b    vector_irq + stubs_offset     b    vector_fiq + stubs_offset //================ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) =&gt;desc_handle_irq(irq, desc);// static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc) {     desc-&gt;handle_irq(irq, desc);//調用中斷号irq對應的handler回調處理函數[luther.gliethttp] } __irq_svc://核心中發生的中斷 __irq_usr://使用者空間時發生的中斷 =&gt;asm_do_IRQ =&gt;irq_exit =&gt;do_softirq =&gt;__do_softirq =&gt; {     ...     h = softirq_vec;//執行軟中斷函數     do {         if (pending &amp; 1) {             h-&gt;action(h); //如果32768的時間到達,那asm_do_IRQ中将觸發raise_softirq(TIMER_SOFTIRQ); //在這裡将執行管理系統tick的run_timer_softirq軟中斷[luther.gliethttp]             rcu_bh_qsctr_inc(cpu);         }         h++;         pending &gt;&gt;= 1;     } while (pending);     ... } start_kernel =&gt;init_timers =&gt;open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) {     softirq_vec[nr].data = data;     softirq_vec[nr].action = action; } static void run_timer_softirq(struct softirq_action *h) {     struct tvec_base *base = __get_cpu_var(tvec_bases);//獲得time時間根     hrtimer_run_pending();     if (time_after_eq(jiffies, base-&gt;timer_jiffies))         __run_timers(base); } //執行軟中斷 =&gt;run_timer_softirq =&gt;__run_timers =&gt; fn = timer-&gt;function; data = timer-&gt;data; fn(data); static inline void __run_timers(struct tvec_base *base) {     ...     spin_lock_irq(&amp;base-&gt;lock);//禁止中斷     ...     fn = timer-&gt;function;     data = timer-&gt;data;     fn(data);     ...     set_running_timer(base, NULL);     spin_unlock_irq(&amp;base-&gt;lock);//打開中斷 } mod_timer =&gt;__mod_timer int __mod_timer(struct timer_list *timer, unsigned long expires) {     struct tvec_base *base, *new_base;     unsigned long flags;     int ret = 0;     timer_stats_timer_set_start_info(timer);     BUG_ON(!timer-&gt;function);     base = lock_timer_base(timer, &amp;flags);     if (timer_pending(timer)) {         detach_timer(timer, 0);         ret = 1;     }     new_base = __get_cpu_var(tvec_bases);//獲得time時間根     if (base != new_base) {         /*          * We are trying to schedule the timer on the local CPU.          * However we can't change timer's base while it is running,          * otherwise del_timer_sync() can't detect that the timer's          * handler yet has not finished. This also guarantees that          * the timer is serialized wrt itself.          */         if (likely(base-&gt;running_timer != timer)) {             /* See the comment in lock_timer_base() */             timer_set_base(timer, NULL);             spin_unlock(&amp;base-&gt;lock);             base = new_base;             spin_lock(&amp;base-&gt;lock);             timer_set_base(timer, base);         }     }     timer-&gt;expires = expires;     internal_add_timer(base, timer); //添加到連結清單上,這樣當timer逾時到達時,run_timer_softirq=&gt;__run_timers軟中斷中将會回調該處理函數[luther.gliethttp].     spin_unlock_irqrestore(&amp;base-&gt;lock, flags);     return ret; }</code>

<code></code>

<code>本文轉自張昺華-sky部落格園部落格,原文連結:</code>http://www.cnblogs.com/sky-heaven/p/7246640.html,如需轉載請自行聯系原作者

繼續閱讀