-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathinterruptmask.c
128 lines (110 loc) · 3.52 KB
/
interruptmask.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
/* Copyright (c) 2024 Elliot Nunn */
/* Licensed under the MIT license */
/*
On both 68k and PowerPC, we change the 68k Status Register for two reasons:
- protect critical sections against reentrancy
- block Qemu pending an interrupt, to save CPU cycles
On the Classic Mac OS, all NuBus/PCI interrupts go through 68k interrupt vector #2.
Enabling and disabling this vector is done through the 68k SR register:
even PowerPC code needs to call through to emulated 68k code to access SR.
An alternative would be to use the Deferred Task Manager,
but this is unsuitable for block devices backing Virtual Memory.
*/
#include <Memory.h>
#include <MixedMode.h>
#include <Multiprocessing.h>
#include <Types.h>
#include "printf.h"
#include "interruptmask.h"
bool Interruptible(short sr) {
return ((sr & 0x700) < 0x200);
}
short DisableInterrupts(void) {
short oldlevel;
#if GENERATINGCFM
static const unsigned short code[] = {
0x40c0, // move.w sr,d0
0x007c, 0x0700, // or.w #$700,sr
0x4e75 // rts
};
oldlevel = CallUniversalProc((void *)code, kCStackBased | RESULT_SIZE(SIZE_CODE(2)));
#else
__asm__ __volatile__ (
" move.w %%sr,%[oldlevel];"
" ori.w #0x700,%%sr;"
: [oldlevel] "=d" (oldlevel) // output
: // input
: // clobber
);
#endif
return oldlevel;
}
void ReenableInterrupts(short oldlevel) {
#if GENERATINGCFM
static const unsigned short code[] = {
0x46c0, // move.w d0,sr
0x4e75 // rts
};
CallUniversalProc((void *)code, kRegisterBased | REGISTER_ROUTINE_PARAMETER(1, kRegisterD0, SIZE_CODE(2)), oldlevel);
#else
__asm__ __volatile__ (
" move.w %[oldlevel],%%sr"
: // output
: [oldlevel] "d" (oldlevel)
: "cc" // clobber
);
#endif
}
// Wait efficiently for an interrupt by sleeping the virtual CPU
void ReenableInterruptsAndWaitFor(short oldlevel, volatile unsigned long *flag) {
#if GENERATINGCFM
// Unfortunately "MPDelayForSys" costs about 1 ms of overhead -- why?
// so on PowerPC we still need to busyloop
ReenableInterrupts(oldlevel);
while (*flag == 0) {}
#else
// Editable piece of machine code containing the STOP instruction:
// STOP takes an "immediate operand" but we need to call it
// with any SR value (various interrupt mask levels etc)
static unsigned short code[6] = { // needs to be 12 bytes for BlockMove to clear it out
0x7008, // moveq #8,d0 // "EnterSupervisorMode"
0xa08d, // _DebugUtil
0x4e72, 0x9999, // stop #placeholder
0x4e75 // rts
};
// Edit the code only when the desired SR value is different (rare),
// using the "BlockMove 12 bytes or more" trick to clear the i-cache.
if ((code[3] & 0xff00) != (oldlevel & 0xff00)) {
code[3] = oldlevel & 0xff00;
BlockMove(code, code, 12);
}
// Skip EnterSupervisorMode (the first two instructions) if already in that mode
unsigned short *jumpto;
if ((oldlevel & 0x2000) == 0) { // check "S" bit of SR
jumpto = code; // do EnterSupervisorMode
} else {
jumpto = code + 2; // don't EnterSupervisorMode
}
for (;;) {
// Call STOP
// (Use asm to work around a problem with 68k codegen:
// compiler tries to make a PC-rel jump)
__asm__ __volatile__ (
" jsr (%[jumpto])"
: // output
: [jumpto] "a" (jumpto)
: "cc", "d0", "d1", "d2", "a0", "a1" // standard function clobbers (by EnterSupervisorMode)
);
if (*flag != 0) {
break;
}
// Woken up prematurely, so go back to sleep for another STOP
DisableInterrupts();
// Close a race condition by polling one more time
if (*flag != 0) {
ReenableInterrupts(oldlevel);
break;
}
}
#endif
}