diff --git a/arch/xtensa/src/esp32/esp32_ble_adapter.c b/arch/xtensa/src/esp32/esp32_ble_adapter.c index 253a407b4d47f..c6122c3ce87a5 100644 --- a/arch/xtensa/src/esp32/esp32_ble_adapter.c +++ b/arch/xtensa/src/esp32/esp32_ble_adapter.c @@ -2353,12 +2353,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry, DEBUGASSERT(task_handle != NULL); #ifdef CONFIG_SMP - ret = sched_lock(); - if (ret) - { - wlerr("Failed to lock scheduler before creating pinned thread\n"); - return false; - } + sched_lock(); #endif pid = kthread_create(name, prio, stack_depth, entry, @@ -2390,12 +2385,7 @@ static int32_t esp_task_create_pinned_to_core(void *entry, } #ifdef CONFIG_SMP - ret = sched_unlock(); - if (ret) - { - wlerr("Failed to unlock scheduler after creating pinned thread\n"); - return false; - } + sched_unlock(); #endif return pid > 0; diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index 0c94f71a3ad6c..e978c270ee971 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -258,9 +258,19 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg); ****************************************************************************/ #ifdef CONFIG_IRQCOUNT + +# if (defined(CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION) && \ + CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0) || \ + defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) irqstate_t enter_critical_section(void) noinstrument_function; +# else +# define enter_critical_section() enter_critical_section_wo_note() +# endif + +irqstate_t enter_critical_section_wo_note(void) noinstrument_function; #else # define enter_critical_section() up_irq_save() +# define enter_critical_section_wo_note() up_irq_save() #endif /**************************************************************************** @@ -288,9 +298,19 @@ irqstate_t enter_critical_section(void) noinstrument_function; ****************************************************************************/ #ifdef CONFIG_IRQCOUNT + +# if (defined(CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION) && \ + CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0) || \ + defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) void leave_critical_section(irqstate_t flags) noinstrument_function; +# else +# define leave_critical_section(f) leave_critical_section_wo_note(f) +# endif + +void leave_critical_section_wo_note(irqstate_t flags) noinstrument_function; #else # define leave_critical_section(f) up_irq_restore(f) +# define leave_critical_section_wo_note(f) up_irq_restore(f) #endif /**************************************************************************** diff --git a/include/nuttx/sched.h b/include/nuttx/sched.h index 7f57396f37b66..84e6badfcdd84 100644 --- a/include/nuttx/sched.h +++ b/include/nuttx/sched.h @@ -134,6 +134,7 @@ #define TCB_FLAG_FORCED_CANCEL (1 << 13) /* Bit 13: Pthread cancel is forced */ #define TCB_FLAG_JOIN_COMPLETED (1 << 14) /* Bit 14: Pthread join completed */ #define TCB_FLAG_FREE_TCB (1 << 15) /* Bit 15: Free tcb after exit */ +#define TCB_FLAG_PREEMPT_SCHED (1 << 16) /* Bit 15: tcb is PREEMPT_SCHED */ /* Values for struct task_group tg_flags */ diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h index cacf6aa8095ff..1bf5dc76832c4 100644 --- a/include/nuttx/spinlock.h +++ b/include/nuttx/spinlock.h @@ -467,10 +467,17 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) spin_lock_wo_note(lock); } + sched_lock_wo_note(); return ret; } #else -# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save()) +static inline_function +irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) +{ + irqstate_t flags = up_irq_save(); + sched_lock_wo_note(); + return flags; +} #endif /**************************************************************************** @@ -527,7 +534,13 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) return flags; } #else -# define spin_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function +irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) +{ + irqstate_t flags = up_irq_save(); + sched_lock_wo_note(); + return flags; +} #endif /**************************************************************************** @@ -635,9 +648,10 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock, } up_irq_restore(flags); + sched_unlock_wo_note(); } #else -# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f)) +# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f), sched_unlock_wo_note()) #endif /**************************************************************************** @@ -683,7 +697,7 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, sched_note_spinlock_unlock(lock); } #else -# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock_wo_note()) #endif #if defined(CONFIG_RW_SPINLOCK) diff --git a/include/sched.h b/include/sched.h index 332dda4055054..a80413245d1cd 100644 --- a/include/sched.h +++ b/include/sched.h @@ -265,8 +265,10 @@ int sched_cpucount(FAR const cpu_set_t *set); /* Task Switching Interfaces (non-standard) */ -int sched_lock(void); -int sched_unlock(void); +void sched_lock_wo_note(void); +void sched_unlock_wo_note(void); +void sched_lock(void); +void sched_unlock(void); int sched_lockcount(void); /* Queries */ diff --git a/include/sys/syscall_lookup.h b/include/sys/syscall_lookup.h index 8a4f281b063cf..00c31df0eb415 100644 --- a/include/sys/syscall_lookup.h +++ b/include/sys/syscall_lookup.h @@ -41,10 +41,12 @@ SYSCALL_LOOKUP(sched_getparam, 2) SYSCALL_LOOKUP(sched_getscheduler, 1) SYSCALL_LOOKUP(sched_lock, 0) SYSCALL_LOOKUP(sched_lockcount, 0) +SYSCALL_LOOKUP(sched_lock_wo_note, 0) SYSCALL_LOOKUP(sched_rr_get_interval, 2) SYSCALL_LOOKUP(sched_setparam, 2) SYSCALL_LOOKUP(sched_setscheduler, 3) SYSCALL_LOOKUP(sched_unlock, 0) +SYSCALL_LOOKUP(sched_unlock_wo_note, 0) SYSCALL_LOOKUP(sched_yield, 0) SYSCALL_LOOKUP(nxsched_get_stackinfo, 2) diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index b8277c758f690..c1b239e55acbf 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -80,7 +80,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS]; ****************************************************************************/ /**************************************************************************** - * Name: enter_critical_section + * Name: enter_critical_section_wo_note * * Description: * Take the CPU IRQ lock and disable interrupts on all CPUs. A thread- @@ -90,7 +90,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS]; ****************************************************************************/ #ifdef CONFIG_SMP -irqstate_t enter_critical_section(void) +irqstate_t enter_critical_section_wo_note(void) { FAR struct tcb_s *rtcb; irqstate_t ret; @@ -246,15 +246,6 @@ irqstate_t enter_critical_section(void) cpu_irqlock_set(cpu); rtcb->irqcount = 1; - - /* Note that we have entered the critical section */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 - nxsched_critmon_csection(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - sched_note_csection(rtcb, true); -#endif } } @@ -265,7 +256,7 @@ irqstate_t enter_critical_section(void) #else -irqstate_t enter_critical_section(void) +irqstate_t enter_critical_section_wo_note(void) { irqstate_t ret; @@ -285,10 +276,28 @@ irqstate_t enter_critical_section(void) */ DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX); - if (++rtcb->irqcount == 1) - { - /* Note that we have entered the critical section */ + rtcb->irqcount++; + } + /* Return interrupt status */ + + return ret; +} +#endif + +#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 ||\ + defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) +irqstate_t enter_critical_section(void) +{ + FAR struct tcb_s *rtcb; + irqstate_t flags; + flags = enter_critical_section_wo_note(); + + if (!up_interrupt_context()) + { + rtcb = this_task(); + if (rtcb->irqcount == 1) + { #if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 nxsched_critmon_csection(rtcb, true, return_address(0)); #endif @@ -298,14 +307,12 @@ irqstate_t enter_critical_section(void) } } - /* Return interrupt status */ - - return ret; + return flags; } #endif /**************************************************************************** - * Name: leave_critical_section + * Name: leave_critical_section_wo_note * * Description: * Decrement the IRQ lock count and if it decrements to zero then release @@ -314,7 +321,7 @@ irqstate_t enter_critical_section(void) ****************************************************************************/ #ifdef CONFIG_SMP -void leave_critical_section(irqstate_t flags) +void leave_critical_section_wo_note(irqstate_t flags) { int cpu; @@ -388,14 +395,6 @@ void leave_critical_section(irqstate_t flags) } else { - /* No.. Note that we have left the critical section */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 - nxsched_critmon_csection(rtcb, false, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - sched_note_csection(rtcb, false); -#endif /* Decrement our count on the lock. If all CPUs have * released, then unlock the spinlock. */ @@ -421,10 +420,8 @@ void leave_critical_section(irqstate_t flags) up_irq_restore(flags); } - #else - -void leave_critical_section(irqstate_t flags) +void leave_critical_section_wo_note(irqstate_t flags) { /* Check if we were called from an interrupt handler and that the tasks * lists have been initialized. @@ -440,22 +437,37 @@ void leave_critical_section(irqstate_t flags) */ DEBUGASSERT(rtcb->irqcount > 0); - if (--rtcb->irqcount <= 0) - { - /* Note that we have left the critical section */ + --rtcb->irqcount; + } -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 - nxsched_critmon_csection(rtcb, false, return_address(0)); + /* Restore the previous interrupt state. */ + + up_irq_restore(flags); +} #endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION + +#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 ||\ + defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) +void leave_critical_section(irqstate_t flags) +{ + FAR struct tcb_s *rtcb; + + if (!up_interrupt_context()) + { + rtcb = this_task(); + if (rtcb->irqcount == 1) + { +# if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 + nxsched_critmon_csection(rtcb, false, return_address(0)); +# endif +# ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION sched_note_csection(rtcb, false); -#endif +# endif } } - /* Restore the previous interrupt state. */ - - up_irq_restore(flags); + leave_critical_section_wo_note(flags); } #endif + #endif /* CONFIG_IRQCOUNT */ diff --git a/sched/sched/sched.h b/sched/sched/sched.h index a978493061070..52156fa99bff8 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -415,6 +415,8 @@ void nxsched_update_critmon(FAR struct tcb_s *tcb); #if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state, FAR void *caller); +#else +# define nxsched_critmon_preemption(t, s, c) #endif #if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index d825e09f14201..5bc754103b919 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -46,6 +46,31 @@ * Public Functions ****************************************************************************/ +/**************************************************************************** + * Name: sched_lock_wo_note + * + * Description: + * This function disables context switching. + * It does not perform instrumentation logic. + * + ****************************************************************************/ + +void sched_lock_wo_note(void) +{ + FAR struct tcb_s *tcb; + + if (up_interrupt_context()) + { + return; + } + + tcb = this_task(); + if (tcb != NULL) + { + tcb->lockcount++; + } +} + /**************************************************************************** * Name: sched_lock * @@ -64,112 +89,31 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { - FAR struct tcb_s *rtcb; - - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ +#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\ + defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION) + FAR struct tcb_s *tcb; + irqstate_t flags; - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (up_interrupt_context()) { - irqstate_t flags; - - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); - - leave_critical_section(flags); + return; } - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_lock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + tcb = this_task(); + if (tcb != NULL) { - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread (on any CPU) - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) + tcb->lockcount++; + if (tcb->lockcount == 1) { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif + flags = enter_critical_section_wo_note(); + nxsched_critmon_preemption(tcb, true, return_address(0)); + sched_note_preemption(tcb, true); + leave_critical_section_wo_note(flags); } } - - return OK; +#else + sched_lock_wo_note(); +#endif } - -#endif /* CONFIG_SMP */ diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index 23def4bf2585e..27f5ed165c837 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -40,278 +40,179 @@ * Public Functions ****************************************************************************/ -/**************************************************************************** - * Name: sched_unlock - * - * Description: - * This function decrements the preemption lock count. Typically this - * is paired with sched_lock() and concludes a critical section of - * code. Preemption will not be unlocked until sched_unlock() has - * been called as many times as sched_lock(). When the lockcount is - * decremented to zero, any tasks that were eligible to preempt the - * current task will execute. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP - -int sched_unlock(void) +static inline_function void sched_preempt_schedule(FAR struct tcb_s *tcb) { - FAR struct tcb_s *rtcb; + bool need_leave_csection = false; + irqstate_t flags; - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ + if (list_pendingtasks()->head != NULL) + { + flags = enter_critical_section_wo_note(); + need_leave_csection = true; - rtcb = this_task(); + if (nxsched_merge_pending()) + { + up_switch_context(this_task(), tcb); + } + } - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. +#if CONFIG_RR_INTERVAL > 0 + /* If (1) the task that was running supported round-robin + * scheduling and (2) if its time slice has already expired, but + * (3) it could not slice out because pre-emption was disabled, + * then we need to swap the task out now and reassess the interval + * timer for the next time slice. */ - if (rtcb != NULL && !up_interrupt_context()) + if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR && + tcb->timeslice == 0) { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); - int cpu = this_cpu(); - - DEBUGASSERT(rtcb->lockcount > 0); - - /* Decrement the preemption lock counter */ - - rtcb->lockcount--; + if (!need_leave_csection) + { + flags = enter_critical_section_wo_note(); + need_leave_csection = true; + } - /* Check if the lock counter has decremented to zero. If so, - * then pre-emption has been re-enabled. + /* Yes.. that is the situation. But one more thing. The call + * to nxsched_merge_pending() above may have actually replaced + * the task at the head of the ready-to-run list. In that + * case, we need only to reset the timeslice value back to the + * maximum. */ - if (rtcb->lockcount <= 0) + if (tcb != this_task()) { - /* Note that we no longer have pre-emption disabled. */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, false, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, false); -#endif - - /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. - * - * NOTE: This operation has a very high likelihood of causing - * this task to be switched out! - */ - - if (list_pendingtasks()->head != NULL) - { - if (nxsched_merge_pending()) - { - up_switch_context(this_task(), rtcb); - } - } - -#if CONFIG_RR_INTERVAL > 0 - /* If (1) the task that was running supported round-robin - * scheduling and (2) if its time slice has already expired, but - * (3) it could not slice out because pre-emption was disabled, - * then we need to swap the task out now and reassess the interval - * timer for the next time slice. - */ - - if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR && - rtcb->timeslice == 0) - { - /* Yes.. that is the situation. But one more thing. The call - * to nxsched_merge_pending() above may have actually replaced - * the task at the head of the ready-to-run list. In that - * case, we need only to reset the timeslice value back to the - * maximum. - */ - - if (rtcb != current_task(cpu)) - { - rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); - } -#ifdef CONFIG_SCHED_TICKLESS - else - { - nxsched_reassess_timer(); - } -#endif - } + tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); + } +# ifdef CONFIG_SCHED_TICKLESS + else if ((tcb->flags & TCB_FLAG_PREEMPT_SCHED) == 0) + { + tcb->flags |= TCB_FLAG_PREEMPT_SCHED; + nxsched_reassess_timer(); + tcb->flags &= ~TCB_FLAG_PREEMPT_SCHED; + } +# endif + } #endif #ifdef CONFIG_SCHED_SPORADIC -#if CONFIG_RR_INTERVAL > 0 - else -#endif - /* If (1) the task that was running supported sporadic scheduling - * and (2) if its budget slice has already expired, but (3) it - * could not slice out because pre-emption was disabled, then we - * need to swap the task out now and reassess the interval timer - * for the next time slice. - */ - - if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC - && rtcb->timeslice < 0) - { - /* Yes.. that is the situation. Force the low-priority state - * now - */ +# if CONFIG_RR_INTERVAL > 0 + else +# endif + /* If (1) the task that was running supported sporadic scheduling + * and (2) if its budget slice has already expired, but (3) it + * could not slice out because pre-emption was disabled, then we + * need to swap the task out now and reassess the interval timer + * for the next time slice. + */ - nxsched_sporadic_lowpriority(rtcb); + if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC + && tcb->timeslice < 0) + { + if (!need_leave_csection) + { + flags = enter_critical_section_wo_note(); + need_leave_csection = true; + } -#ifdef CONFIG_SCHED_TICKLESS - /* Make sure that the call to nxsched_merge_pending() did not - * change the currently active task. - */ +# ifdef CONFIG_SCHED_TICKLESS + /* Make sure that the call to nxsched_merge_pending() did not + * change the currently active task. + */ - if (rtcb == current_task(cpu)) - { - nxsched_reassess_timer(); - } -#endif - } -#endif + if (tcb == current_task(cpu) && + (tcb->flags & TCB_FLAG_PREEMPT_SCHED) == 0) + { + tcb->flags |= TCB_FLAG_PREEMPT_SCHED; + nxsched_reassess_timer(); + tcb->flags &= ~TCB_FLAG_PREEMPT_SCHED; } - - UNUSED(cpu); - leave_critical_section(flags); +# endif } +#endif - return OK; + if (need_leave_csection) + { + leave_critical_section_wo_note(flags); + } } -#else /* CONFIG_SMP */ +/**************************************************************************** + * Name: sched_unlock_wo_note + * + * Description: + * This function decrements the preemption lock count. + * It does not perform instrumentation logic. + * + ****************************************************************************/ -int sched_unlock(void) +void sched_unlock_wo_note(void) { - FAR struct tcb_s *rtcb = this_task(); + FAR struct tcb_s *tcb; - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (up_interrupt_context()) { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); - - DEBUGASSERT(rtcb->lockcount > 0); - - /* Decrement the preemption lock counter */ - - rtcb->lockcount--; - - /* Check if the lock counter has decremented to zero. If so, - * then pre-emption has been re-enabled. - */ + return; + } - if (rtcb->lockcount <= 0) + tcb = this_task(); + if (tcb != NULL) + { + tcb->lockcount--; + DEBUGASSERT(tcb->lockcount >= 0); + if (tcb->lockcount == 0) { - /* Note that we no longer have pre-emption disabled. */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, false, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, false); -#endif - - /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. - * - * NOTE: This operation has a very high likelihood of causing - * this task to be switched out! - * - * In the single CPU case, decrementing lockcount to zero is - * sufficient to release the pending tasks. Further, in that - * configuration, critical sections and pre-emption can operate - * fully independently. - */ - - if (list_pendingtasks()->head != NULL) - { - if (nxsched_merge_pending()) - { - up_switch_context(this_task(), rtcb); - } - } - -#if CONFIG_RR_INTERVAL > 0 - /* If (1) the task that was running supported round-robin - * scheduling and (2) if its time slice has already expired, but - * (3) it could not be sliced out because pre-emption was disabled, - * then we need to swap the task out now and reassess the interval - * timer for the next time slice. - */ - - if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR && - rtcb->timeslice == 0) - { - /* Yes.. that is the situation. But one more thing: The call - * to nxsched_merge_pending() above may have actually replaced - * the task at the head of the ready-to-run list. In that - * case, we need only to reset the timeslice value back to the - * maximum. - */ - - if (rtcb != this_task()) - { - rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); - } -#ifdef CONFIG_SCHED_TICKLESS - else - { - nxsched_reassess_timer(); - } -#endif - } -#endif + sched_preempt_schedule(tcb); + } + } +} -#ifdef CONFIG_SCHED_SPORADIC -#if CONFIG_RR_INTERVAL > 0 - else -#endif - /* If (1) the task that was running supported sporadic scheduling - * and (2) if its budget slice has already expired, but (3) it - * could not slice out because pre-emption was disabled, then we - * need to swap the task out now and reassess the interval timer - * for the next time slice. - */ +/**************************************************************************** + * Name: sched_unlock + * + * Description: + * This function decrements the preemption lock count. Typically this + * is paired with sched_lock() and concludes a critical section of + * code. Preemption will not be unlocked until sched_unlock() has + * been called as many times as sched_lock(). When the lockcount is + * decremented to zero, any tasks that were eligible to preempt the + * current task will execute. + * + ****************************************************************************/ - if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC - && rtcb->timeslice < 0) - { - /* Yes.. that is the situation. Force the low-priority state - * now - */ +void sched_unlock(void) +{ + FAR struct tcb_s *tcb = this_task(); - nxsched_sporadic_lowpriority(rtcb); + if (up_interrupt_context()) + { + return; + } -#ifdef CONFIG_SCHED_TICKLESS - /* Make sure that the call to nxsched_merge_pending() did not - * change the currently active task. - */ + tcb = this_task(); + if (tcb != NULL) + { +#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\ + defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION) + irqstate_t flags = enter_critical_section_wo_note(); - if (rtcb == this_task()) - { - nxsched_reassess_timer(); - } -#endif - } -#endif + tcb->lockcount--; + DEBUGASSERT(tcb->lockcount >= 0); + if (tcb->lockcount == 0) + { + nxsched_critmon_preemption(tcb, false, return_address(0)); + sched_note_preemption(tcb, false); + sched_preempt_schedule(tcb); } - leave_critical_section(flags); + leave_critical_section_wo_note(flags); +#else + tcb->lockcount--; + DEBUGASSERT(tcb->lockcount >= 0); + if (tcb->lockcount == 0) + { + sched_preempt_schedule(tcb); + } +#endif } - - return OK; } - -#endif /* CONFIG_SMP */ diff --git a/syscall/syscall.csv b/syscall/syscall.csv index 959f7d9545ea9..670330891ccf3 100644 --- a/syscall/syscall.csv +++ b/syscall/syscall.csv @@ -140,13 +140,15 @@ "sched_getcpu","sched.h","","int" "sched_getparam","sched.h","","int","pid_t","FAR struct sched_param *" "sched_getscheduler","sched.h","","int","pid_t" -"sched_lock","sched.h","","int" +"sched_lock","sched.h","","void" "sched_lockcount","sched.h","","int" +"sched_lock_wo_note","sched.h","","void" "sched_rr_get_interval","sched.h","","int","pid_t","struct timespec *" "sched_setaffinity","sched.h","defined(CONFIG_SMP)","int","pid_t","size_t","FAR const cpu_set_t*" "sched_setparam","sched.h","","int","pid_t","const struct sched_param *" "sched_setscheduler","sched.h","","int","pid_t","int","const struct sched_param *" -"sched_unlock","sched.h","","int" +"sched_unlock","sched.h","","void" +"sched_unlock_wo_note","sched.h","","void" "sched_yield","sched.h","","int" "select","sys/select.h","","int","int","FAR fd_set *","FAR fd_set *","FAR fd_set *","FAR struct timeval *" "send","sys/socket.h","defined(CONFIG_NET)","ssize_t","int","FAR const void *","size_t","int"