mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
add LockOSThread and UnlockOSThread to
runtime package for use by debugger, which needs to make sure that all ptrace calls about a given pid come from the same thread. R=r DELTA=175 (90 added, 63 deleted, 22 changed) OCL=31546 CL=31558
This commit is contained in:
parent
9126b75e35
commit
218c393029
3 changed files with 107 additions and 80 deletions
|
|
@ -26,3 +26,17 @@ func Breakpoint()
|
||||||
// program counter, file name, and line number within the file of the corresponding
|
// program counter, file name, and line number within the file of the corresponding
|
||||||
// call. The boolean ok is false if it was not possible to recover the information.
|
// call. The boolean ok is false if it was not possible to recover the information.
|
||||||
func Caller(n int) (pc uintptr, file string, line int, ok bool)
|
func Caller(n int) (pc uintptr, file string, line int, ok bool)
|
||||||
|
|
||||||
|
// mid returns the current os thread (m) id.
|
||||||
|
func mid() uint32
|
||||||
|
|
||||||
|
// LockOSThread wires the calling goroutine to its current operating system thread.
|
||||||
|
// Until the calling goroutine exits or calls UnlockOSThread, it will always
|
||||||
|
// execute in that thread, and no other goroutine can.
|
||||||
|
// LockOSThread cannot be used during init functions.
|
||||||
|
func LockOSThread()
|
||||||
|
|
||||||
|
// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
|
||||||
|
// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
|
||||||
|
func UnlockOSThread()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -69,11 +69,12 @@ Sched sched;
|
||||||
static void gput(G*); // put/get on ghead/gtail
|
static void gput(G*); // put/get on ghead/gtail
|
||||||
static G* gget(void);
|
static G* gget(void);
|
||||||
static void mput(M*); // put/get on mhead
|
static void mput(M*); // put/get on mhead
|
||||||
static M* mget(void);
|
static M* mget(G*);
|
||||||
static void gfput(G*); // put/get on gfree
|
static void gfput(G*); // put/get on gfree
|
||||||
static G* gfget(void);
|
static G* gfget(void);
|
||||||
static void matchmg(void); // match ms to gs
|
static void matchmg(void); // match ms to gs
|
||||||
static void readylocked(G*); // ready, but sched is locked
|
static void readylocked(G*); // ready, but sched is locked
|
||||||
|
static void mnextg(M*, G*);
|
||||||
|
|
||||||
// Scheduler loop.
|
// Scheduler loop.
|
||||||
static void scheduler(void);
|
static void scheduler(void);
|
||||||
|
|
@ -131,11 +132,6 @@ initdone(void)
|
||||||
void
|
void
|
||||||
goexit(void)
|
goexit(void)
|
||||||
{
|
{
|
||||||
if(debug > 1){
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("goexit goid=%d\n", g->goid);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
g->status = Gmoribund;
|
g->status = Gmoribund;
|
||||||
gosched();
|
gosched();
|
||||||
}
|
}
|
||||||
|
|
@ -157,6 +153,14 @@ tracebackothers(G *me)
|
||||||
static void
|
static void
|
||||||
gput(G *g)
|
gput(G *g)
|
||||||
{
|
{
|
||||||
|
M *m;
|
||||||
|
|
||||||
|
// If g is wired, hand it off directly.
|
||||||
|
if((m = g->lockedm) != nil) {
|
||||||
|
mnextg(m, g);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
g->schedlink = nil;
|
g->schedlink = nil;
|
||||||
if(sched.ghead == nil)
|
if(sched.ghead == nil)
|
||||||
sched.ghead = g;
|
sched.ghead = g;
|
||||||
|
|
@ -191,14 +195,18 @@ mput(M *m)
|
||||||
sched.mwait++;
|
sched.mwait++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get from `m' list. Sched must be locked.
|
// Get an `m' to run `g'. Sched must be locked.
|
||||||
static M*
|
static M*
|
||||||
mget(void)
|
mget(G *g)
|
||||||
{
|
{
|
||||||
M *m;
|
M *m;
|
||||||
|
|
||||||
m = sched.mhead;
|
// if g has its own m, use it.
|
||||||
if(m){
|
if((m = g->lockedm) != nil)
|
||||||
|
return m;
|
||||||
|
|
||||||
|
// otherwise use general m pool.
|
||||||
|
if((m = sched.mhead) != nil){
|
||||||
sched.mhead = m->schedlink;
|
sched.mhead = m->schedlink;
|
||||||
sched.mwait--;
|
sched.mwait--;
|
||||||
}
|
}
|
||||||
|
|
@ -257,6 +265,18 @@ readylocked(G *g)
|
||||||
matchmg();
|
matchmg();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pass g to m for running.
|
||||||
|
static void
|
||||||
|
mnextg(M *m, G *g)
|
||||||
|
{
|
||||||
|
sched.mcpu++;
|
||||||
|
m->nextg = g;
|
||||||
|
if(m->waitnextg) {
|
||||||
|
m->waitnextg = 0;
|
||||||
|
notewakeup(&m->havenextg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the next goroutine that m should run.
|
// Get the next goroutine that m should run.
|
||||||
// Sched must be locked on entry, is unlocked on exit.
|
// Sched must be locked on entry, is unlocked on exit.
|
||||||
// Makes sure that at most $GOMAXPROCS gs are
|
// Makes sure that at most $GOMAXPROCS gs are
|
||||||
|
|
@ -266,37 +286,42 @@ nextgandunlock(void)
|
||||||
{
|
{
|
||||||
G *gp;
|
G *gp;
|
||||||
|
|
||||||
// On startup, each m is assigned a nextg and
|
if(sched.mcpu < 0)
|
||||||
// has already been accounted for in mcpu.
|
throw("negative sched.mcpu");
|
||||||
|
|
||||||
|
// If there is a g waiting as m->nextg,
|
||||||
|
// mnextg took care of the sched.mcpu++.
|
||||||
if(m->nextg != nil) {
|
if(m->nextg != nil) {
|
||||||
gp = m->nextg;
|
gp = m->nextg;
|
||||||
m->nextg = nil;
|
m->nextg = nil;
|
||||||
unlock(&sched);
|
unlock(&sched);
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("m%d nextg found g%d\n", m->id, gp->goid);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
return gp;
|
return gp;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, look for work.
|
if(m->lockedg != nil) {
|
||||||
if(sched.mcpu < sched.mcpumax && (gp=gget()) != nil) {
|
// We can only run one g, and it's not available.
|
||||||
sched.mcpu++;
|
// Make sure some other cpu is running to handle
|
||||||
unlock(&sched);
|
// the ordinary run queue.
|
||||||
if(debug > 1) {
|
if(sched.gwait != 0)
|
||||||
lock(&debuglock);
|
matchmg();
|
||||||
printf("m%d nextg got g%d\n", m->id, gp->goid);
|
} else {
|
||||||
unlock(&debuglock);
|
// Look for work on global queue.
|
||||||
|
while(sched.mcpu < sched.mcpumax && (gp=gget()) != nil) {
|
||||||
|
if(gp->lockedm) {
|
||||||
|
mnextg(gp->lockedm, gp);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
sched.mcpu++; // this m will run gp
|
||||||
|
unlock(&sched);
|
||||||
|
return gp;
|
||||||
}
|
}
|
||||||
return gp;
|
// Otherwise, wait on global m queue.
|
||||||
|
mput(m);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, sleep.
|
|
||||||
mput(m);
|
|
||||||
if(sched.mcpu == 0 && sched.msyscall == 0)
|
if(sched.mcpu == 0 && sched.msyscall == 0)
|
||||||
throw("all goroutines are asleep - deadlock!");
|
throw("all goroutines are asleep - deadlock!");
|
||||||
m->nextg = nil;
|
m->nextg = nil;
|
||||||
|
m->waitnextg = 1;
|
||||||
noteclear(&m->havenextg);
|
noteclear(&m->havenextg);
|
||||||
if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
|
if(sched.waitstop && sched.mcpu <= sched.mcpumax) {
|
||||||
sched.waitstop = 0;
|
sched.waitstop = 0;
|
||||||
|
|
@ -308,11 +333,6 @@ nextgandunlock(void)
|
||||||
if((gp = m->nextg) == nil)
|
if((gp = m->nextg) == nil)
|
||||||
throw("bad m->nextg in nextgoroutine");
|
throw("bad m->nextg in nextgoroutine");
|
||||||
m->nextg = nil;
|
m->nextg = nil;
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("m%d nextg woke g%d\n", m->id, gp->goid);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
return gp;
|
return gp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -364,34 +384,15 @@ matchmg(void)
|
||||||
M *m;
|
M *m;
|
||||||
G *g;
|
G *g;
|
||||||
|
|
||||||
if(debug > 1 && sched.ghead != nil) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("matchmg mcpu=%d mcpumax=%d gwait=%d\n", sched.mcpu, sched.mcpumax, sched.gwait);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
|
|
||||||
while(sched.mcpu < sched.mcpumax && (g = gget()) != nil){
|
while(sched.mcpu < sched.mcpumax && (g = gget()) != nil){
|
||||||
sched.mcpu++;
|
// Find the m that will run g.
|
||||||
if((m = mget()) != nil){
|
if((m = mget(g)) == nil){
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("wakeup m%d g%d\n", m->id, g->goid);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
m->nextg = g;
|
|
||||||
notewakeup(&m->havenextg);
|
|
||||||
}else{
|
|
||||||
m = malloc(sizeof(M));
|
m = malloc(sizeof(M));
|
||||||
m->g0 = malg(8192);
|
m->g0 = malg(8192);
|
||||||
m->nextg = g;
|
|
||||||
m->id = sched.mcount++;
|
m->id = sched.mcount++;
|
||||||
if(debug) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("alloc m=%p m%d g%d\n", m, m->id, g->goid);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
newosproc(m, m->g0, m->g0->stackbase, mstart);
|
newosproc(m, m->g0, m->g0->stackbase, mstart);
|
||||||
}
|
}
|
||||||
|
mnextg(m, g);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -414,11 +415,9 @@ scheduler(void)
|
||||||
gp = m->curg;
|
gp = m->curg;
|
||||||
gp->m = nil;
|
gp->m = nil;
|
||||||
sched.mcpu--;
|
sched.mcpu--;
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
if(sched.mcpu < 0)
|
||||||
printf("m%d sched g%d status %d\n", m->id, gp->goid, gp->status);
|
throw("sched.mcpu < 0 in scheduler");
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
switch(gp->status){
|
switch(gp->status){
|
||||||
case Grunnable:
|
case Grunnable:
|
||||||
case Gdead:
|
case Gdead:
|
||||||
|
|
@ -430,6 +429,10 @@ scheduler(void)
|
||||||
break;
|
break;
|
||||||
case Gmoribund:
|
case Gmoribund:
|
||||||
gp->status = Gdead;
|
gp->status = Gdead;
|
||||||
|
if(gp->lockedm) {
|
||||||
|
gp->lockedm = nil;
|
||||||
|
m->lockedg = nil;
|
||||||
|
}
|
||||||
if(--sched.gcount == 0)
|
if(--sched.gcount == 0)
|
||||||
exit(0);
|
exit(0);
|
||||||
break;
|
break;
|
||||||
|
|
@ -444,12 +447,6 @@ scheduler(void)
|
||||||
gp = nextgandunlock();
|
gp = nextgandunlock();
|
||||||
gp->readyonstop = 0;
|
gp->readyonstop = 0;
|
||||||
gp->status = Grunning;
|
gp->status = Grunning;
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("m%d run g%d at %p\n", m->id, gp->goid, gp->sched.pc);
|
|
||||||
traceback(gp->sched.pc, gp->sched.sp, gp);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
m->curg = gp;
|
m->curg = gp;
|
||||||
gp->m = m;
|
gp->m = m;
|
||||||
if(gp->sched.pc == (byte*)goexit) // kickoff
|
if(gp->sched.pc == (byte*)goexit) // kickoff
|
||||||
|
|
@ -478,13 +475,8 @@ gosched(void)
|
||||||
void
|
void
|
||||||
sys·entersyscall(uint64 callerpc, int64 trap)
|
sys·entersyscall(uint64 callerpc, int64 trap)
|
||||||
{
|
{
|
||||||
USED(callerpc);
|
USED(callerpc, trap);
|
||||||
|
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("m%d g%d enter syscall %D\n", m->id, g->goid, trap);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
lock(&sched);
|
lock(&sched);
|
||||||
g->status = Gsyscall;
|
g->status = Gsyscall;
|
||||||
// Leave SP around for gc and traceback.
|
// Leave SP around for gc and traceback.
|
||||||
|
|
@ -509,12 +501,6 @@ sys·entersyscall(uint64 callerpc, int64 trap)
|
||||||
void
|
void
|
||||||
sys·exitsyscall(void)
|
sys·exitsyscall(void)
|
||||||
{
|
{
|
||||||
if(debug > 1) {
|
|
||||||
lock(&debuglock);
|
|
||||||
printf("m%d g%d exit syscall mcpu=%d mcpumax=%d\n", m->id, g->goid, sched.mcpu, sched.mcpumax);
|
|
||||||
unlock(&debuglock);
|
|
||||||
}
|
|
||||||
|
|
||||||
lock(&sched);
|
lock(&sched);
|
||||||
g->status = Grunning;
|
g->status = Grunning;
|
||||||
sched.msyscall--;
|
sched.msyscall--;
|
||||||
|
|
@ -528,7 +514,7 @@ sys·exitsyscall(void)
|
||||||
|
|
||||||
// Slow path - all the cpus are taken.
|
// Slow path - all the cpus are taken.
|
||||||
// The scheduler will ready g and put this m to sleep.
|
// The scheduler will ready g and put this m to sleep.
|
||||||
// When the scheduler takes g awa from m,
|
// When the scheduler takes g away from m,
|
||||||
// it will undo the sched.mcpu++ above.
|
// it will undo the sched.mcpu++ above.
|
||||||
gosched();
|
gosched();
|
||||||
}
|
}
|
||||||
|
|
@ -804,3 +790,27 @@ runtime·Gosched(void)
|
||||||
gosched();
|
gosched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
runtime·LockOSThread(void)
|
||||||
|
{
|
||||||
|
if(sched.predawn)
|
||||||
|
throw("cannot wire during init");
|
||||||
|
m->lockedg = g;
|
||||||
|
g->lockedm = m;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
runtime·UnlockOSThread(void)
|
||||||
|
{
|
||||||
|
m->lockedg = nil;
|
||||||
|
g->lockedm = nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
// for testing of wire, unwire
|
||||||
|
void
|
||||||
|
runtime·mid(uint32 ret)
|
||||||
|
{
|
||||||
|
ret = m->id;
|
||||||
|
FLUSH(&ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -158,6 +158,7 @@ struct G
|
||||||
G* schedlink;
|
G* schedlink;
|
||||||
bool readyonstop;
|
bool readyonstop;
|
||||||
M* m; // for debuggers, but offset not hard-coded
|
M* m; // for debuggers, but offset not hard-coded
|
||||||
|
M* lockedm;
|
||||||
};
|
};
|
||||||
struct Mem
|
struct Mem
|
||||||
{
|
{
|
||||||
|
|
@ -187,12 +188,14 @@ struct M
|
||||||
int32 mallocing;
|
int32 mallocing;
|
||||||
int32 gcing;
|
int32 gcing;
|
||||||
int32 locks;
|
int32 locks;
|
||||||
|
int32 waitnextg;
|
||||||
Note havenextg;
|
Note havenextg;
|
||||||
G* nextg;
|
G* nextg;
|
||||||
M* schedlink;
|
M* schedlink;
|
||||||
Mem mem;
|
Mem mem;
|
||||||
uint32 machport; // Return address for Mach IPC (OS X)
|
uint32 machport; // Return address for Mach IPC (OS X)
|
||||||
MCache *mcache;
|
MCache *mcache;
|
||||||
|
G* lockedg;
|
||||||
};
|
};
|
||||||
struct Stktop
|
struct Stktop
|
||||||
{
|
{
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue