Replaced thread_lock by interrupt disable. Not working!!!
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 21 Jan 2014 17:03:25 +0000 (18:03 +0100)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 21 Jan 2014 17:03:25 +0000 (18:03 +0100)
19 files changed:
sys/geom/geom_kern.c
sys/kern/init_main.c
sys/kern/kern_clock.c
sys/kern/kern_cpuset.c
sys/kern/kern_fork.c
sys/kern/kern_idle.c
sys/kern/kern_intr.c
sys/kern/kern_kthread.c
sys/kern/kern_mutex.c
sys/kern/kern_resource.c
sys/kern/kern_switch.c
sys/kern/kern_synch.c
sys/kern/kern_timeout.c
sys/kern/kern_umtx.c
sys/kern/subr_sleepqueue.c
sys/kern/subr_taskqueue.c
sys/kern/subr_trap.c
sys/kern/vfs_subr.c
sys/vm/vm_zeroidle.c

index 1744d17..68269f5 100644 (file)
@@ -90,9 +90,14 @@ g_up_procbody(void *arg)
 {
 
        mtx_assert(&Giant, MA_NOTOWNED);
-       thread_lock(g_up_td);
+//     thread_lock(g_up_td);
+#warning "g_up_procbody lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_prio(g_up_td, PRIBIO);
-       thread_unlock(g_up_td);
+//     thread_unlock(g_up_td);
+       intr_restore(rflags);
+       enable_intr();
        for(;;) {
                g_io_schedule_up(g_up_td);
        }
@@ -103,9 +108,14 @@ g_down_procbody(void *arg)
 {
 
        mtx_assert(&Giant, MA_NOTOWNED);
-       thread_lock(g_down_td);
+//     thread_lock(g_down_td);
+#warning "g_down_procbody lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_prio(g_down_td, PRIBIO);
-       thread_unlock(g_down_td);
+//     thread_unlock(g_down_td);
+       intr_restore(rflags);
+       enable_intr();
        for(;;) {
                g_io_schedule_down(g_down_td);
        }
@@ -116,9 +126,14 @@ g_event_procbody(void *arg)
 {
 
        mtx_assert(&Giant, MA_NOTOWNED);
-       thread_lock(g_event_td);
+//     thread_lock(g_event_td);
+#warning "g_event_procbody lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_prio(g_event_td, PRIBIO);
-       thread_unlock(g_event_td);
+//     thread_unlock(g_event_td);
+       intr_restore(rflags);
+       enable_intr();
        g_run_events();
        /* NOTREACHED */
 }
index 824e453..e3996b6 100644 (file)
@@ -809,6 +809,8 @@ create_init(const void *udata __unused)
        struct ucred *newcred, *oldcred;
        int error;
 
+       printf("create init begin.\n");
+
        error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, 0, &initproc,
            NULL, 0);
        if (error)
@@ -831,6 +833,7 @@ create_init(const void *udata __unused)
        crfree(oldcred);
        cred_update_thread(FIRST_THREAD_IN_PROC(initproc));
        cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL);
+       printf("create init done.\n");
 }
 SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL);
 
@@ -843,9 +846,14 @@ kick_init(const void *udata __unused)
        struct thread *td;
 
        td = FIRST_THREAD_IN_PROC(initproc);
-       thread_lock(td);
+//     thread_lock(td);
+#warning "ruxagg lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        TD_SET_CAN_RUN(td);
        sched_add(td, SRQ_BORING);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 }
 SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL);
index 23714a6..220f453 100644 (file)
@@ -537,10 +537,15 @@ hardclock_cnt(int cnt, int usermode)
                        flags |= TDF_PROFPEND | TDF_ASTPENDING;
                PROC_SUNLOCK(p);
        }
-       thread_lock(td);
+//     thread_lock(td);
+#warning "hardclock_cnt lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_tick(cnt);
        td->td_flags |= flags;
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 
 #ifdef HWPMC_HOOKS
        if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
@@ -765,10 +770,15 @@ statclock_cnt(int cnt, int usermode)
        KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
            "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
        SDT_PROBE2(sched, , , tick, td, td->td_proc);
-       thread_lock_flags(td, MTX_QUIET);
+//     thread_lock_flags(td, MTX_QUIET);
+#warning "statclock_cnt lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        for ( ; cnt > 0; cnt--)
                sched_clock(td);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 #ifdef HWPMC_HOOKS
        if (td->td_intr_frame != NULL)
                PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame);
index cee3e81..07db2b8 100644 (file)
@@ -714,7 +714,10 @@ cpuset_setthread(lwpid_t id, cpuset_t *mask)
        if (error)
                goto out;
        set = NULL;
-       thread_lock(td);
+//     thread_lock(td);
+#warning "cpuset_setthread lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        error = cpuset_shadow(td->td_cpuset, nset, mask);
        if (error == 0) {
                set = td->td_cpuset;
@@ -722,13 +725,16 @@ cpuset_setthread(lwpid_t id, cpuset_t *mask)
                sched_affinity(td);
                nset = NULL;
        }
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
        PROC_UNLOCK(p);
        if (set)
                cpuset_rel(set);
 out:
        if (nset)
                uma_zfree(cpuset_zone, nset);
+
        return (error);
 }
 
index 7a0cece..43ae27c 100644 (file)
@@ -487,10 +487,14 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
        /*
         * Allow the scheduler to initialize the child.
         */
-       thread_lock(td);
+//     thread_lock(td);
+#warning "do_fork lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_fork(td, td2);
-       thread_unlock(td);
-
+       intr_restore(rflags);
+       enable_intr();
+//     thread_unlock(td);
        /*
         * Duplicate sub-structures as needed.
         * Increase reference counts on shared objects.
@@ -718,10 +722,15 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2,
                 * If RFSTOPPED not requested, make child runnable and
                 * add to run queue.
                 */
-               thread_lock(td2);
+//             thread_lock(td2);
+#warning "do_fork lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                TD_SET_CAN_RUN(td2);
                sched_add(td2, SRQ_BORING);
-               thread_unlock(td2);
+//             thread_unlock(td2);
+               intr_restore(rflags);
+               enable_intr();
        }
 
        /*
@@ -914,9 +923,10 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp,
                    lim_cur(p1, RLIMIT_NPROC));
                PROC_UNLOCK(p1);
        }
+//     printf("fork1 abschnitt 4,5.\n");
        if (ok) {
                do_fork(td, flags, newproc, td2, vm2, pdflags);
-
+//             printf("fork1 abschnitt 4,6.\n");
                /*
                 * Return child proc pointer to parent.
                 */
@@ -929,6 +939,7 @@ fork1(struct thread *td, int flags, int pages, struct proc **procp,
                return (0);
        }
 
+//     printf("fork1 abschnitt 5.\n");
        error = EAGAIN;
 fail:
        sx_sunlock(&proctree_lock);
@@ -949,6 +960,7 @@ fail1:
                fdrop(fp_procdesc, td);
 #endif
        pause("fork", hz / 2);
+//     printf("fork1 done..\n");
        return (error);
 }
 
@@ -981,7 +993,13 @@ fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
                PCPU_SET(deadthread, NULL);
                thread_stash(dtd);
        }
-       thread_unlock(td);
+#warning "fork_exit unlock removed"
+//     thread_unlock(td);
+       register_t rflags;
+       rflags = intr_disable();
+       intr_restore(rflags);
+       enable_intr();
+
 
        /*
         * cpu_set_fork_handler intercepts this function call to
@@ -990,7 +1008,7 @@ fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
         */
        KASSERT(callout != NULL, ("NULL callout in fork_exit"));
        callout(arg, frame);
-
+       printf("callout done.\n");
        /*
         * Check if a kernel thread misbehaved and returned from its main
         * function.
@@ -1001,7 +1019,7 @@ fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
                kproc_exit(0);
        }
        mtx_assert(&Giant, MA_NOTOWNED);
-
+       printf("fork_exit done.\n");
        if (p->p_sysent->sv_schedtail != NULL)
                (p->p_sysent->sv_schedtail)(td);
 }
index f412d17..02df4b5 100644 (file)
@@ -74,13 +74,20 @@ idle_setup(void *dummy)
                if (error)
                        panic("idle_setup: kproc_create error %d\n", error);
 
-               thread_lock(td);
+//             thread_lock(td);
+#warning "idle_setup lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                TD_SET_CAN_RUN(td);
                td->td_flags |= TDF_IDLETD | TDF_NOLOAD;
                sched_class(td, PRI_IDLE);
                sched_prio(td, PRI_MAX_IDLE);
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
 #ifdef SMP
        }
 #endif
+
+       printf("idle init done.\n");
 }
index c3d8fcb..610cb79 100644 (file)
@@ -183,9 +183,14 @@ ithread_update(struct intr_thread *ithd)
 #ifdef KTR
        sched_clear_tdname(td);
 #endif
-       thread_lock(td);
+//     thread_lock(td);
+#warning "ithread_update lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_prio(td, pri);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 }
 
 /*
@@ -456,10 +461,15 @@ ithread_create(const char *name)
                    0, "intr", "%s", name);
        if (error)
                panic("kproc_create() failed with %d", error);
-       thread_lock(td);
+//     thread_lock(td);
+#warning "ithread_create lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_class(td, PRI_ITHD);
        TD_SET_IWAIT(td);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
        td->td_pflags |= TDP_ITHREAD;
        ithd->it_thread = td;
        CTR2(KTR_INTR, "%s: created %s", __func__, name);
@@ -547,8 +557,9 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
 
        /* Create a thread if we need one. */
        while (ie->ie_thread == NULL && handler != NULL) {
-               if (ie->ie_flags & IE_ADDING_THREAD)
+               if (ie->ie_flags & IE_ADDING_THREAD) {
                        msleep(ie, &ie->ie_lock, 0, "ithread", 0);
+               }
                else {
                        ie->ie_flags |= IE_ADDING_THREAD;
                        mtx_unlock(&ie->ie_lock);
@@ -567,18 +578,18 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
                if (temp_ih->ih_pri > ih->ih_pri)
                        break;
        }
+
        if (temp_ih == NULL)
                TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
        else
                TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
        intr_event_update(ie);
-
        CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
            ie->ie_name);
        mtx_unlock(&ie->ie_lock);
-
        if (cookiep != NULL)
                *cookiep = ih;
+
        return (0);
 }
 #else
@@ -913,7 +924,10 @@ intr_event_schedule_thread(struct intr_event *ie)
         * put it on the runqueue.
         */
        it->it_need = 1;
-       thread_lock(td);
+//     thread_lock(td);
+#warning "intr_event_schedule_thread lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        if (TD_AWAITING_INTR(td)) {
                CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
                    td->td_name);
@@ -923,7 +937,9 @@ intr_event_schedule_thread(struct intr_event *ie)
                CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
                    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
        }
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 
        return (0);
 }
@@ -1129,9 +1145,14 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
                return (error);
        if (pri == SWI_CLOCK) {
                td = ie->ie_thread->it_thread;
-               thread_lock(td);
+//             thread_lock(td);
+#warning "swi_add lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                td->td_flags |= TDF_NOLOAD;
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
        }
        return (0);
 }
@@ -1366,7 +1387,11 @@ ithread_loop(void *arg)
                 * lock.  This may take a while and it_need may get
                 * set again, so we have to check it again.
                 */
-               thread_lock(td);
+//             thread_lock(td);
+#warning "ithread_loop lock removed"
+               register_t rflags;
+               rflags = intr_disable();
+
                if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
                        TD_SET_IWAIT(td);
                        ie->ie_count = 0;
@@ -1376,7 +1401,10 @@ ithread_loop(void *arg)
                        wake = 1;
                        ithd->it_flags &= ~IT_WAIT;
                }
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
+
                if (wake) {
                        wakeup(ithd);
                        wake = 0;
index 9dcdeb0..4e2a7a9 100644 (file)
@@ -121,18 +121,21 @@ kproc_create(void (*func)(void *), void *arg,
 
        /* call the processes' main()... */
        cpu_set_fork_handler(td, func, arg);
-
        /* Avoid inheriting affinity from a random parent. */
        cpuset_setthread(td->td_tid, cpuset_root);
-       thread_lock(td);
+//     thread_lock(td);
+#warning "kproc_create lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        TD_SET_CAN_RUN(td);
        sched_prio(td, PVM);
        sched_user_prio(td, PUSER);
-
        /* Delay putting it on the run queue until now. */
        if (!(flags & RFSTOPPED))
                sched_add(td, SRQ_BORING); 
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 
        return 0;
 }
@@ -297,11 +300,16 @@ kthread_add(void (*func)(void *), void *arg, struct proc *p,
        p->p_flag |= P_HADTHREADS;
        newtd->td_sigmask = oldtd->td_sigmask; /* XXX dubious */
        thread_link(newtd, p);
-       thread_lock(oldtd);
+//     thread_lock(oldtd);
+#warning "kthread_add lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        /* let the scheduler know about these things. */
        sched_fork_thread(oldtd, newtd);
        TD_SET_CAN_RUN(newtd);
-       thread_unlock(oldtd);
+//     thread_unlock(oldtd);
+       intr_restore(rflags);
+       enable_intr();
        PROC_UNLOCK(p);
 
        tidhash_add(newtd);
@@ -311,9 +319,14 @@ kthread_add(void (*func)(void *), void *arg, struct proc *p,
 
        /* Delay putting it on the run queue until now. */
        if (!(flags & RFSTOPPED)) {
-               thread_lock(newtd);
+//             thread_lock(newtd);
+#warning "kthread_add lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                sched_add(newtd, SRQ_BORING); 
-               thread_unlock(newtd);
+//             thread_unlock(newtd);
+               intr_restore(rflags);
+               enable_intr();
        }
        if (newtdp)
                *newtdp = newtd;
index 461bc5f..84f6438 100644 (file)
@@ -590,19 +590,23 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
 
        for (;;) {
 retry:
+//             printf("_thread_lock_flags spinlock enter\n");
                spinlock_enter();
                m = td->td_lock;
+//             printf("_thread_lock_flags KASSERTS enter\n");
                KASSERT(m->mtx_lock != MTX_DESTROYED,
                    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
                KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
                    ("thread_lock() of sleep mutex %s @ %s:%d",
                    m->lock_object.lo_name, file, line));
+//             printf("_thread_lock_flags if enter\n");
                if (mtx_owned(m))
                        KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
            ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
                            m->lock_object.lo_name, file, line));
                WITNESS_CHECKORDER(&m->lock_object,
                    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
+//             printf("_thread_lock_flags while enter\n");
                while (!_mtx_obtain_lock(m, tid)) {
 #ifdef KDTRACE_HOOKS
                        spin_cnt++;
index 5c88da7..c7b2d9e 100644 (file)
@@ -1058,14 +1058,19 @@ void
 ruxagg(struct proc *p, struct thread *td)
 {
 
-       thread_lock(td);
+//     thread_lock(td);
+#warning "ruxagg lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        ruxagg_locked(&p->p_rux, td);
        ruxagg_locked(&td->td_rux, td);
        td->td_incruntime = 0;
        td->td_uticks = 0;
        td->td_iticks = 0;
        td->td_sticks = 0;
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 }
 
 /*
index 32a0dde..f36d4a1 100644 (file)
@@ -202,7 +202,10 @@ critical_exit(void)
                td->td_critnest = 0;
                if (td->td_owepreempt && !kdb_active) {
                        td->td_critnest = 1;
-                       thread_lock(td);
+//                     thread_lock(td);
+#warning "critical_exit lock removed"
+                       register_t rflags;
+                       rflags = intr_disable();
                        td->td_critnest--;
                        flags = SW_INVOL | SW_PREEMPT;
                        if (TD_IS_IDLETHREAD(td))
@@ -210,7 +213,9 @@ critical_exit(void)
                        else
                                flags |= SWT_OWEPREEMPT;
                        mi_switch(flags, NULL);
-                       thread_unlock(td);
+//                     thread_unlock(td);
+                       intr_restore(rflags);
+                       enable_intr();
                }
        } else
                td->td_critnest--;
index e2e4081..8aa208e 100644 (file)
@@ -153,7 +153,6 @@ _sleep(void *ident, struct lock_object *lock, int priority,
        struct lock_class *class;
        int catch, flags, lock_state, pri, rval;
        WITNESS_SAVE_DECL(lock_witness);
-
        td = curthread;
        p = td->td_proc;
 #ifdef KTRACE
@@ -173,7 +172,6 @@ _sleep(void *ident, struct lock_object *lock, int priority,
                class = LOCK_CLASS(lock);
        else
                class = NULL;
-
        if (cold || SCHEDULER_STOPPED()) {
                /*
                 * During autoconfiguration, just return;
@@ -189,7 +187,6 @@ _sleep(void *ident, struct lock_object *lock, int priority,
        }
        catch = priority & PCATCH;
        pri = priority & PRIMASK;
-
        /*
         * If we are already on a sleep queue, then remove us from that
         * sleep queue first.  We have to do this to handle recursive
@@ -197,7 +194,6 @@ _sleep(void *ident, struct lock_object *lock, int priority,
         */
        if (TD_ON_SLEEPQ(td))
                sleepq_remove(td, td->td_wchan);
-
        if (ident == &pause_wchan)
                flags = SLEEPQ_PAUSE;
        else
@@ -206,7 +202,6 @@ _sleep(void *ident, struct lock_object *lock, int priority,
                flags |= SLEEPQ_INTERRUPTIBLE;
        if (priority & PBDRY)
                flags |= SLEEPQ_STOP_ON_BDRY;
-
        sleepq_lock(ident);
        CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
            td->td_tid, p->p_pid, td->td_name, wmesg, ident);
@@ -221,7 +216,6 @@ _sleep(void *ident, struct lock_object *lock, int priority,
        } else
                /* GCC needs to follow the Yellow Brick Road */
                lock_state = -1;
-
        /*
         * We put ourselves on the sleep queue and start our timeout
         * before calling thread_suspend_check, as we could stop there,
@@ -240,14 +234,26 @@ _sleep(void *ident, struct lock_object *lock, int priority,
                lock_state = class->lc_unlock(lock);
                sleepq_lock(ident);
        }
-       if (timo && catch)
+//     printf("_sleep 1\n");
+       if (timo && catch) {
+//             printf("_sleep 1.1\n");
                rval = sleepq_timedwait_sig(ident, pri);
-       else if (timo)
+//             printf("_sleep 1.2\n");
+       }
+       else if (timo) {
+//             printf("_sleep 1.3\n");
                rval = sleepq_timedwait(ident, pri);
-       else if (catch)
+//             printf("_sleep 1.4\n");
+       }
+       else if (catch) {
+//             printf("_sleep 1.5\n");
                rval = sleepq_wait_sig(ident, pri);
+//             printf("_sleep 1.6\n");
+       }
        else {
+//             printf("_sleep 1.7\n");
                sleepq_wait(ident, pri);
+//             printf("_sleep 1.8\n");
                rval = 0;
        }
 #ifdef KTRACE
@@ -259,6 +265,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
                class->lc_lock(lock, lock_state);
                WITNESS_RESTORE(lock, lock_witness);
        }
+
        return (rval);
 }
 
@@ -596,13 +603,18 @@ kern_yield(int prio)
 
        td = curthread;
        DROP_GIANT();
-       thread_lock(td);
+//     thread_lock(td);
+#warning "kern_yield lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        if (prio == PRI_USER)
                prio = td->td_user_pri;
        if (prio >= 0)
                sched_prio(td, prio);
        mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
        PICKUP_GIANT();
 }
 
index d661664..67ca7f3 100644 (file)
@@ -492,6 +492,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
                 * while we switched locks.
                 */
                if (cc->cc_cancel) {
+                       printf("softclock_call_cc 3\n");
                        class->lc_unlock(c_lock);
                        goto skip;
                }
@@ -565,6 +566,7 @@ skip:
                        cc_cme_cleanup(cc);
                cc->cc_waiting = 0;
                CC_UNLOCK(cc);
+               printf("softclock_call_cc 7\n");
                wakeup(&cc->cc_waiting);
                CC_LOCK(cc);
        } else if (cc_cme_migrating(cc)) {
index 64b6c33..4c47084 100644 (file)
@@ -3685,7 +3685,12 @@ umtx_thread_cleanup(struct thread *td)
                TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
        }
        mtx_unlock_spin(&umtx_lock);
-       thread_lock(td);
+//     thread_lock(td);
+#warning "umtx_thread_cleanup lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sched_lend_user_prio(td, PRI_MAX);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 }
index 0eb2c8c..04aee49 100644 (file)
@@ -341,7 +341,10 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
                MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
                LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
        }
-       thread_lock(td);
+#warning "sleepq_add lock removed"
+       register_t rflags;
+       rflags = intr_disable();
+//     thread_lock(td);
        TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
        sq->sq_blockedcnt[queue]++;
        td->td_sleepqueue = NULL;
@@ -354,7 +357,9 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
                if (flags & SLEEPQ_STOP_ON_BDRY)
                        td->td_flags |= TDF_SBDRY;
        }
-       thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
+//     thread_unlock(td);
 }
 
 /*
@@ -425,7 +430,11 @@ sleepq_catch_signals(void *wchan, int pri)
         * we can switch immediately.  Otherwise do the signal processing
         * directly.
         */
-       thread_lock(td);
+
+//     thread_lock(td);
+#warning "sleepq_catch_signals lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
                sleepq_switch(wchan, pri);
                return (0);
@@ -460,6 +469,9 @@ sleepq_catch_signals(void *wchan, int pri)
        mtx_lock_spin(&sc->sc_lock);
        PROC_UNLOCK(p);
        thread_lock(td);
+       intr_restore(rflags);
+       enable_intr();
+
        PROC_SUNLOCK(p);
        if (ret == 0) {
                sleepq_switch(wchan, pri);
@@ -502,7 +514,6 @@ sleepq_switch(void *wchan, int pri)
        sc = SC_LOOKUP(wchan);
        mtx_assert(&sc->sc_lock, MA_OWNED);
        THREAD_LOCK_ASSERT(td, MA_OWNED);
-
        /* 
         * If we have a sleep queue, then we've already been woken up, so
         * just return.
@@ -511,7 +522,6 @@ sleepq_switch(void *wchan, int pri)
                mtx_unlock_spin(&sc->sc_lock);
                return;
        }
-
        /*
         * If TDF_TIMEOUT is set, then our sleep has been timed out
         * already but we are still on the sleep queue, so dequeue the
@@ -519,8 +529,11 @@ sleepq_switch(void *wchan, int pri)
         */
        if (td->td_flags & TDF_TIMEOUT) {
                MPASS(TD_ON_SLEEPQ(td));
+//             printf("sleepq_switch 1\n");
                sq = sleepq_lookup(wchan);
+//             printf("sleepq_switch 2\n");
                if (sleepq_resume_thread(sq, td, 0)) {
+//                     printf("sleepq_switch 3\n");
 #ifdef INVARIANTS
                        /*
                         * This thread hasn't gone to sleep yet, so it
@@ -529,6 +542,7 @@ sleepq_switch(void *wchan, int pri)
                        panic("not waking up swapper");
 #endif
                }
+//             printf("sleepq_switch 4\n");
                mtx_unlock_spin(&sc->sc_lock);
                return;         
        }
@@ -538,7 +552,8 @@ sleepq_switch(void *wchan, int pri)
 #endif
        MPASS(td->td_sleepqueue == NULL);
        sched_sleep(td, pri);
-       thread_lock_set(td, &sc->sc_lock);
+#warning "sleepq_switch lock removed"
+//     thread_lock_set(td, &sc->sc_lock);
        SDT_PROBE0(sched, , , sleep);
        TD_SET_SLEEPING(td);
        mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
@@ -619,9 +634,14 @@ sleepq_wait(void *wchan, int pri)
 
        td = curthread;
        MPASS(!(td->td_flags & TDF_SINTR));
-       thread_lock(td);
+//     thread_lock(td);
+#warning "sleepq_wait lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sleepq_switch(wchan, pri);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 }
 
 /*
@@ -636,7 +656,11 @@ sleepq_wait_sig(void *wchan, int pri)
 
        rcatch = sleepq_catch_signals(wchan, pri);
        rval = sleepq_check_signals();
-       thread_unlock(curthread);
+//     thread_unlock(curthread);
+#warning "sleepq_wait_sig unlock removed"
+       register_t rflags;
+       intr_restore(rflags);
+       enable_intr();
        if (rcatch)
                return (rcatch);
        return (rval);
@@ -654,10 +678,15 @@ sleepq_timedwait(void *wchan, int pri)
 
        td = curthread;
        MPASS(!(td->td_flags & TDF_SINTR));
-       thread_lock(td);
+//     thread_lock(td);
+#warning "sleepq_timedwait lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        sleepq_switch(wchan, pri);
        rval = sleepq_check_timeout();
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 
        return (rval);
 }
@@ -720,9 +749,9 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
        THREAD_LOCK_ASSERT(td, MA_OWNED);
        sc = SC_LOOKUP(sq->sq_wchan);
        mtx_assert(&sc->sc_lock, MA_OWNED);
-
+//     printf("sleepq_resume_thread 1\n");
        SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
-
+//     printf("sleepq_resume_thread 2\n");
        /* Remove the thread from the queue. */
        sq->sq_blockedcnt[td->td_sqqueue]--;
        TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
@@ -839,9 +868,14 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
                        besttd = td;
        }
        MPASS(besttd != NULL);
-       thread_lock(besttd);
+//     thread_lock(besttd);
+#warning "sleepq_signal lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
-       thread_unlock(besttd);
+//     thread_unlock(besttd);
+       intr_restore(rflags);
+       enable_intr();
        return (wakeup_swapper);
 }
 
@@ -867,10 +901,15 @@ sleepq_broadcast(void *wchan, int flags, int pri, int queue)
        /* Resume all blocked threads on the sleep queue. */
        wakeup_swapper = 0;
        TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
-               thread_lock(td);
+//             thread_lock(td);
+#warning "sleepq_broadcast lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                if (sleepq_resume_thread(sq, td, pri))
                        wakeup_swapper = 1;
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
        }
        return (wakeup_swapper);
 }
@@ -897,7 +936,10 @@ sleepq_timeout(void *arg)
         * First, see if the thread is asleep and get the wait channel if
         * it is.
         */
-       thread_lock(td);
+//     thread_lock(td);
+#warning "sleepq_timeout lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
                wchan = td->td_wchan;
                sc = SC_LOOKUP(wchan);
@@ -906,7 +948,9 @@ sleepq_timeout(void *arg)
                MPASS(sq != NULL);
                td->td_flags |= TDF_TIMEOUT;
                wakeup_swapper = sleepq_resume_thread(sq, td, 0);
-               thread_unlock(td);
+               //thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
                if (wakeup_swapper)
                        kick_proc0();
                return;
@@ -920,7 +964,9 @@ sleepq_timeout(void *arg)
         */
        if (TD_ON_SLEEPQ(td)) {
                td->td_flags |= TDF_TIMEOUT;
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
                return;
        }
 
@@ -940,7 +986,9 @@ sleepq_timeout(void *arg)
                wakeup_swapper = setrunnable(td);
        } else
                td->td_flags |= TDF_TIMOFAIL;
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
        if (wakeup_swapper)
                kick_proc0();
 }
@@ -974,11 +1022,17 @@ sleepq_remove(struct thread *td, void *wchan)
                return;
        }
        /* Thread is asleep on sleep queue sq, so wake it up. */
-       thread_lock(td);
+//     thread_lock(td);
+#warning "sleepq_remove lock removed"
+       register_t rflags;
+       rflags = intr_disable();
+
        MPASS(sq != NULL);
        MPASS(td->td_wchan == wchan);
        wakeup_swapper = sleepq_resume_thread(sq, td, 0);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
        sleepq_release(wchan);
        if (wakeup_swapper)
                kick_proc0();
index 31ea52d..636f46e 100644 (file)
@@ -476,10 +476,15 @@ taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
                if (tq->tq_threads[i] == NULL)
                        continue;
                td = tq->tq_threads[i];
-               thread_lock(td);
+//             thread_lock(td);
+#warning "taskqueue_start_threads lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                sched_prio(td, pri);
                sched_add(td, SRQ_BORING);
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
        }
 
        return (0);
index e57ab9e..9fe6e24 100644 (file)
@@ -183,11 +183,16 @@ ast(struct trapframe *framep)
         * AST's saved in flags, the astpending flag will be set and
         * ast() will be called again.
         */
-       thread_lock(td);
+//     thread_lock(td);
+#warning "ast lock removed"
+       register_t rflags;
+       rflags = intr_disable();
        flags = td->td_flags;
        td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
            TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
        PCPU_INC(cnt.v_trap);
 
        if (td->td_ucred != p->p_ucred) 
@@ -221,10 +226,15 @@ ast(struct trapframe *framep)
                if (KTRPOINT(td, KTR_CSW))
                        ktrcsw(1, 1, __func__);
 #endif
-               thread_lock(td);
+//             thread_lock(td);
+#warning "ast lock removed"
+               register_t rflags;
+               rflags = intr_disable();
                sched_prio(td, td->td_user_pri);
                mi_switch(SW_INVOL | SWT_NEEDRESCHED, NULL);
-               thread_unlock(td);
+//             thread_unlock(td);
+               intr_restore(rflags);
+               enable_intr();
 #ifdef KTRACE
                if (KTRPOINT(td, KTR_CSW))
                        ktrcsw(0, 1, __func__);
index 43accca..a6fabc7 100644 (file)
@@ -1917,9 +1917,14 @@ sched_sync(void)
                 */
                if (syncer_state != SYNCER_RUNNING ||
                    time_uptime == starttime) {
-                       thread_lock(td);
+//                     thread_lock(td);
+#warning "sched_sync lock removed"
+                       register_t rflags;
+                       rflags = intr_disable();
                        sched_prio(td, PPAUSE);
-                       thread_unlock(td);
+//                     thread_unlock(td);
+                       intr_restore(rflags);
+                       enable_intr();
                }
                if (syncer_state != SYNCER_RUNNING)
                        cv_timedwait(&sync_wakeup, &sync_mtx,
index 6ba96e1..f8b53a7 100644 (file)
@@ -72,7 +72,6 @@ static int zero_state;
 static int
 vm_page_zero_check(void)
 {
-
        if (!idlezero_enable)
                return (0);
        /*
@@ -122,6 +121,7 @@ vm_pagezero(void __unused *arg)
 
        mtx_lock(&vm_page_queue_free_mtx);
        for (;;) {
+//             printf("vm_pagezero begin\n");
                if (vm_page_zero_check()) {
                        vm_page_zero_idle();
 #ifndef PREEMPTION
@@ -133,9 +133,12 @@ vm_pagezero(void __unused *arg)
 #endif
                } else {
                        wakeup_needed = TRUE;
+//                     printf("vm_pagezero msleep enter\n");
                        msleep(&zero_state, &vm_page_queue_free_mtx, 0,
                            "pgzero", hz * 300);
+//                     printf("vm_pagezero msleep leave\n");
                }
+//             printf("vm_pagezero done\n");
        }
 }
 
@@ -150,13 +153,19 @@ pagezero_start(void __unused *arg)
        if (error)
                panic("pagezero_start: error %d\n", error);
        td = FIRST_THREAD_IN_PROC(p);
-       thread_lock(td);
+
+//     thread_lock(td);
+#warning "pagezero_start lock removed"
+       register_t rflags;
+       rflags = intr_disable();
 
        /* We're an idle task, don't count us in the load. */
        td->td_flags |= TDF_NOLOAD;
        sched_class(td, PRI_IDLE);
        sched_prio(td, PRI_MAX_IDLE);
        sched_add(td, SRQ_BORING);
-       thread_unlock(td);
+//     thread_unlock(td);
+       intr_restore(rflags);
+       enable_intr();
 }
 SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL);