diff --git a/sys/compat/linuxkpi/common/include/linux/sched.h b/sys/compat/linuxkpi/common/include/linux/sched.h --- a/sys/compat/linuxkpi/common/include/linux/sched.h +++ b/sys/compat/linuxkpi/common/include/linux/sched.h @@ -82,6 +82,7 @@ int bsd_interrupt_value; struct work_struct *work; /* current work struct, if set */ struct task_struct *group_leader; + unsigned rcu_section[TS_RCU_TYPE_MAX]; }; #define current ({ \ diff --git a/sys/compat/linuxkpi/common/src/linux_rcu.c b/sys/compat/linuxkpi/common/src/linux_rcu.c --- a/sys/compat/linuxkpi/common/src/linux_rcu.c +++ b/sys/compat/linuxkpi/common/src/linux_rcu.c @@ -178,6 +178,8 @@ } } +CTASSERT(sizeof(((struct task_struct *)0)->rcu_section[0] == sizeof(ck_epoch_section_t))); + void linux_rcu_read_lock(unsigned type) { @@ -189,6 +191,11 @@ if (RCU_SKIP()) return; + ts = current; + + if (++(ts->rcu_recurse[type]) != 1) + return; + /* * Pin thread to current CPU so that the unlock code gets the * same per-CPU epoch record: @@ -196,17 +203,15 @@ sched_pin(); record = &DPCPU_GET(linux_epoch_record[type]); - ts = current; /* * Use a critical section to prevent recursion inside * ck_epoch_begin(). Else this function supports recursion. */ critical_enter(); - ck_epoch_begin(&record->epoch_record, NULL); - ts->rcu_recurse[type]++; - if (ts->rcu_recurse[type] == 1) - TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]); + ck_epoch_begin(&record->epoch_record, + container_of(&ts->rcu_section[type], ck_epoch_section_t, bucket)); + TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]); critical_exit(); } @@ -221,18 +226,21 @@ if (RCU_SKIP()) return; - record = &DPCPU_GET(linux_epoch_record[type]); ts = current; + if (--(ts->rcu_recurse[type]) != 0) + return; + + record = &DPCPU_GET(linux_epoch_record[type]); + /* * Use a critical section to prevent recursion inside * ck_epoch_end(). Else this function supports recursion. */ critical_enter(); - ck_epoch_end(&record->epoch_record, NULL); - ts->rcu_recurse[type]--; - if (ts->rcu_recurse[type] == 0) - TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]); + ck_epoch_end(&record->epoch_record, + container_of(&ts->rcu_section[type], ck_epoch_section_t, bucket)); + TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]); critical_exit(); sched_unpin();