@@ -61,30 +61,15 @@ macro_rules! define_rust_probestack {
61
61
( $body: expr) => {
62
62
concat!(
63
63
"
64
- // We are about to define a 'function within a function.' Because the
65
- // compiler will have emitted a .cfi_startproc at the beginning of
66
- // __rust_probestack_wrapper, we need .cfi_endproc before we can define
67
- // the contents of __rust_probestack.
68
- .cfi_endproc
69
-
70
64
.pushsection .text.__rust_probestack
71
65
.globl __rust_probestack
72
66
.type __rust_probestack, @function
73
67
__rust_probestack:
74
- .cfi_startproc
75
-
76
68
" ,
77
69
$body,
78
70
"
79
-
80
- .cfi_endproc
81
-
82
71
.size __rust_probestack, . - __rust_probestack
83
72
.popsection
84
-
85
- // Similar to above, we add .cfi_startproc here to match the
86
- // .cfi_endproc emitted at the end of __rust_probestack_wrapper.
87
- .cfi_startproc
88
73
"
89
74
)
90
75
} ;
@@ -100,119 +85,105 @@ macro_rules! define_rust_probestack {
100
85
( $body: expr) => {
101
86
concat!(
102
87
"
103
- .cfi_endproc
104
88
.globl __rust_probestack
105
89
__rust_probestack:
106
- .cfi_startproc
107
-
108
90
" ,
109
- $body,
110
- "
111
-
112
- .cfi_endproc
113
- .cfi_startproc
114
- "
91
+ $body
115
92
)
116
93
} ;
117
94
}
118
95
119
- #[ naked]
120
- #[ no_mangle]
96
+ // Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax,
97
+ // ensuring that if any pages are unmapped we'll make a page fault.
98
+ //
99
+ // The ABI here is that the stack frame size is located in `%rax`. Upon
100
+ // return we're not supposed to modify `%rsp` or `%rax`.
121
101
#[ cfg( all( target_arch = "x86_64" , not( feature = "mangled-names" ) ) ) ]
122
- pub unsafe extern "C" fn __rust_probestack_wrapper ( ) {
123
- // Our goal here is to touch each page between %rsp+8 and %rsp+8-%rax,
124
- // ensuring that if any pages are unmapped we'll make a page fault.
102
+ global_asm ! ( define_rust_probestack!( "
103
+ .cfi_startproc
104
+ pushq %rbp
105
+ .cfi_adjust_cfa_offset 8
106
+ .cfi_offset %rbp, -16
107
+ movq %rsp, %rbp
108
+ .cfi_def_cfa_register %rbp
109
+
110
+ mov %rax,%r11 // duplicate %rax as we're clobbering %r11
111
+
112
+ // Main loop, taken in one page increments. We're decrementing rsp by
113
+ // a page each time until there's less than a page remaining. We're
114
+ // guaranteed that this function isn't called unless there's more than a
115
+ // page needed.
125
116
//
126
- // The ABI here is that the stack frame size is located in `%rax`. Upon
127
- // return we're not supposed to modify `%rsp` or `%rax`.
128
- asm ! ( define_rust_probestack!( "
129
- pushq %rbp
130
- .cfi_adjust_cfa_offset 8
131
- .cfi_offset %rbp, -16
132
- movq %rsp, %rbp
133
- .cfi_def_cfa_register %rbp
134
-
135
- mov %rax,%r11 // duplicate %rax as we're clobbering %r11
136
-
137
- // Main loop, taken in one page increments. We're decrementing rsp by
138
- // a page each time until there's less than a page remaining. We're
139
- // guaranteed that this function isn't called unless there's more than a
140
- // page needed.
141
- //
142
- // Note that we're also testing against `8(%rsp)` to account for the 8
143
- // bytes pushed on the stack orginally with our return address. Using
144
- // `8(%rsp)` simulates us testing the stack pointer in the caller's
145
- // context.
146
-
147
- // It's usually called when %rax >= 0x1000, but that's not always true.
148
- // Dynamic stack allocation, which is needed to implement unsized
149
- // rvalues, triggers stackprobe even if %rax < 0x1000.
150
- // Thus we have to check %r11 first to avoid segfault.
151
- cmp $$0x1000,%r11
152
- jna 3f
153
- 2:
154
- sub $$0x1000,%rsp
155
- test %rsp,8(%rsp)
156
- sub $$0x1000,%r11
157
- cmp $$0x1000,%r11
158
- ja 2b
159
-
160
- 3:
161
- // Finish up the last remaining stack space requested, getting the last
162
- // bits out of r11
163
- sub %r11,%rsp
164
- test %rsp,8(%rsp)
165
-
166
- // Restore the stack pointer to what it previously was when entering
167
- // this function. The caller will readjust the stack pointer after we
168
- // return.
169
- add %rax,%rsp
170
-
171
- leave
172
- .cfi_def_cfa_register %rsp
173
- .cfi_adjust_cfa_offset -8
174
- ret
175
- " ) :: : "memory" : "volatile" ) ;
176
- :: core:: intrinsics:: unreachable ( ) ;
177
- }
117
+ // Note that we're also testing against `8(%rsp)` to account for the 8
118
+ // bytes pushed on the stack orginally with our return address. Using
119
+ // `8(%rsp)` simulates us testing the stack pointer in the caller's
120
+ // context.
121
+
122
+ // It's usually called when %rax >= 0x1000, but that's not always true.
123
+ // Dynamic stack allocation, which is needed to implement unsized
124
+ // rvalues, triggers stackprobe even if %rax < 0x1000.
125
+ // Thus we have to check %r11 first to avoid segfault.
126
+ cmp $0x1000,%r11
127
+ jna 3f
128
+ 2:
129
+ sub $0x1000,%rsp
130
+ test %rsp,8(%rsp)
131
+ sub $0x1000,%r11
132
+ cmp $0x1000,%r11
133
+ ja 2b
134
+
135
+ 3:
136
+ // Finish up the last remaining stack space requested, getting the last
137
+ // bits out of r11
138
+ sub %r11,%rsp
139
+ test %rsp,8(%rsp)
140
+
141
+ // Restore the stack pointer to what it previously was when entering
142
+ // this function. The caller will readjust the stack pointer after we
143
+ // return.
144
+ add %rax,%rsp
145
+
146
+ leave
147
+ .cfi_def_cfa_register %rsp
148
+ .cfi_adjust_cfa_offset -8
149
+ ret
150
+ .cfi_endproc
151
+ " ) ) ;
178
152
179
- #[ naked]
180
- #[ no_mangle]
181
153
#[ cfg( all( target_arch = "x86" , not( feature = "mangled-names" ) ) ) ]
182
- pub unsafe extern "C" fn __rust_probestack_wrapper ( ) {
183
- // This is the same as x86_64 above, only translated for 32-bit sizes. Note
184
- // that on Unix we're expected to restore everything as it was, this
185
- // function basically can't tamper with anything.
186
- //
187
- // The ABI here is the same as x86_64, except everything is 32-bits large.
188
- asm ! ( define_rust_probestack!( "
189
- push %ebp
190
- .cfi_adjust_cfa_offset 4
191
- .cfi_offset %ebp, -8
192
- mov %esp, %ebp
193
- .cfi_def_cfa_register %ebp
194
- push %ecx
195
- mov %eax,%ecx
196
-
197
- cmp $$0x1000,%ecx
198
- jna 3f
199
- 2:
200
- sub $$0x1000,%esp
201
- test %esp,8(%esp)
202
- sub $$0x1000,%ecx
203
- cmp $$0x1000,%ecx
204
- ja 2b
205
-
206
- 3:
207
- sub %ecx,%esp
208
- test %esp,8(%esp)
209
-
210
- add %eax,%esp
211
- pop %ecx
212
- leave
213
- .cfi_def_cfa_register %esp
214
- .cfi_adjust_cfa_offset -4
215
- ret
216
- " ) :: : "memory" : "volatile" ) ;
217
- :: core:: intrinsics:: unreachable ( ) ;
218
- }
154
+ // This is the same as x86_64 above, only translated for 32-bit sizes. Note
155
+ // that on Unix we're expected to restore everything as it was, this
156
+ // function basically can't tamper with anything.
157
+ //
158
+ // The ABI here is the same as x86_64, except everything is 32-bits large.
159
+ global_asm ! ( define_rust_probestack!( "
160
+ .cfi_startproc
161
+ push %ebp
162
+ .cfi_adjust_cfa_offset 4
163
+ .cfi_offset %ebp, -8
164
+ mov %esp, %ebp
165
+ .cfi_def_cfa_register %ebp
166
+ push %ecx
167
+ mov %eax,%ecx
168
+
169
+ cmp $0x1000,%ecx
170
+ jna 3f
171
+ 2:
172
+ sub $0x1000,%esp
173
+ test %esp,8(%esp)
174
+ sub $0x1000,%ecx
175
+ cmp $0x1000,%ecx
176
+ ja 2b
177
+
178
+ 3:
179
+ sub %ecx,%esp
180
+ test %esp,8(%esp)
181
+
182
+ add %eax,%esp
183
+ pop %ecx
184
+ leave
185
+ .cfi_def_cfa_register %esp
186
+ .cfi_adjust_cfa_offset -4
187
+ ret
188
+ .cfi_endproc
189
+ " ) ) ;
0 commit comments