|
1 |
| -use rustc_abi::{Align, Endian, HasDataLayout, Size}; |
| 1 | +use rustc_abi::{Align, BackendRepr, Endian, HasDataLayout, Size}; |
2 | 2 | use rustc_codegen_ssa::common::IntPredicate;
|
3 | 3 | use rustc_codegen_ssa::mir::operand::OperandRef;
|
4 | 4 | use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods};
|
@@ -234,6 +234,148 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
|
234 | 234 | val
|
235 | 235 | }
|
236 | 236 |
|
| 237 | +fn emit_powerpc_va_arg<'ll, 'tcx>( |
| 238 | + bx: &mut Builder<'_, 'll, 'tcx>, |
| 239 | + list: OperandRef<'tcx, &'ll Value>, |
| 240 | + target_ty: Ty<'tcx>, |
| 241 | +) -> &'ll Value { |
| 242 | + let dl = bx.cx.data_layout(); |
| 243 | + |
| 244 | + // struct __va_list_tag { |
| 245 | + // unsigned char gpr; |
| 246 | + // unsigned char fpr; |
| 247 | + // unsigned short reserved; |
| 248 | + // void *overflow_arg_area; |
| 249 | + // void *reg_save_area; |
| 250 | + // }; |
| 251 | + let va_list_addr = list.immediate(); |
| 252 | + |
| 253 | + // Peel off any newtype wrappers. |
| 254 | + let layout = { |
| 255 | + let mut layout = bx.cx.layout_of(target_ty); |
| 256 | + |
| 257 | + while let Some((_, inner)) = layout.non_1zst_field(bx.cx) { |
| 258 | + layout = inner; |
| 259 | + } |
| 260 | + |
| 261 | + layout |
| 262 | + }; |
| 263 | + |
| 264 | + // Rust does not support any powerpc softfloat targets. |
| 265 | + let is_soft_float_abi = false; |
| 266 | + |
| 267 | + // All instances of VaArgSafe are passed directly. |
| 268 | + let is_indirect = false; |
| 269 | + |
| 270 | + let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() { |
| 271 | + BackendRepr::Scalar(scalar) => match scalar.primitive() { |
| 272 | + rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false), |
| 273 | + rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64), |
| 274 | + rustc_abi::Primitive::Pointer(_) => (false, true, false), |
| 275 | + }, |
| 276 | + _ => unreachable!("all instances of VaArgSafe are represented as scalars"), |
| 277 | + }; |
| 278 | + |
| 279 | + let num_regs_addr = if is_int || is_soft_float_abi { |
| 280 | + va_list_addr // gpr |
| 281 | + } else { |
| 282 | + bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr |
| 283 | + }; |
| 284 | + |
| 285 | + let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi); |
| 286 | + |
| 287 | + // "Align" the register count when the type is passed as `i64`. |
| 288 | + if is_i64 || (is_f64 && is_soft_float_abi) { |
| 289 | + num_regs = bx.add(num_regs, bx.const_u8(1)); |
| 290 | + num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110)); |
| 291 | + } |
| 292 | + |
| 293 | + let max_regs = 8u8; |
| 294 | + let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs)); |
| 295 | + |
| 296 | + let in_reg = bx.append_sibling_block("va_arg.in_reg"); |
| 297 | + let in_mem = bx.append_sibling_block("va_arg.in_mem"); |
| 298 | + let end = bx.append_sibling_block("va_arg.end"); |
| 299 | + |
| 300 | + bx.cond_br(use_regs, in_reg, in_mem); |
| 301 | + |
| 302 | + let reg_addr = { |
| 303 | + bx.switch_to_block(in_reg); |
| 304 | + |
| 305 | + let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4)); |
| 306 | + let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi); |
| 307 | + |
| 308 | + // Floating-point registers start after the general-purpose registers. |
| 309 | + if !is_int && !is_soft_float_abi { |
| 310 | + reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32)) |
| 311 | + } |
| 312 | + |
| 313 | + // Get the address of the saved value by scaling the number of |
| 314 | + // registers we've used by the number of. |
| 315 | + let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 }; |
| 316 | + let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size)); |
| 317 | + let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset); |
| 318 | + |
| 319 | + // Increase the used-register count. |
| 320 | + let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 }; |
| 321 | + let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr)); |
| 322 | + bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi); |
| 323 | + |
| 324 | + bx.br(end); |
| 325 | + |
| 326 | + reg_addr |
| 327 | + }; |
| 328 | + |
| 329 | + let mem_addr = { |
| 330 | + bx.switch_to_block(in_mem); |
| 331 | + |
| 332 | + bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi); |
| 333 | + |
| 334 | + // Everything in the overflow area is rounded up to a size of at least 4. |
| 335 | + let overflow_area_align = Align::from_bytes(4).unwrap(); |
| 336 | + |
| 337 | + let size = if !is_indirect { |
| 338 | + layout.layout.size.align_to(overflow_area_align) |
| 339 | + } else { |
| 340 | + dl.pointer_size |
| 341 | + }; |
| 342 | + |
| 343 | + let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2)); |
| 344 | + let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi); |
| 345 | + |
| 346 | + // Round up address of argument to alignment |
| 347 | + if layout.layout.align.abi > overflow_area_align { |
| 348 | + overflow_area = round_pointer_up_to_alignment( |
| 349 | + bx, |
| 350 | + overflow_area, |
| 351 | + layout.layout.align.abi, |
| 352 | + bx.type_ptr(), |
| 353 | + ); |
| 354 | + } |
| 355 | + |
| 356 | + let mem_addr = overflow_area; |
| 357 | + |
| 358 | + // Increase the overflow area. |
| 359 | + overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes())); |
| 360 | + bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi); |
| 361 | + |
| 362 | + bx.br(end); |
| 363 | + |
| 364 | + mem_addr |
| 365 | + }; |
| 366 | + |
| 367 | + // Return the appropriate result. |
| 368 | + bx.switch_to_block(end); |
| 369 | + let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); |
| 370 | + let val_type = layout.llvm_type(bx); |
| 371 | + let val_addr = if is_indirect { |
| 372 | + bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) |
| 373 | + } else { |
| 374 | + val_addr |
| 375 | + }; |
| 376 | + bx.load(val_type, val_addr, layout.align.abi) |
| 377 | +} |
| 378 | + |
237 | 379 | fn emit_s390x_va_arg<'ll, 'tcx>(
|
238 | 380 | bx: &mut Builder<'_, 'll, 'tcx>,
|
239 | 381 | list: OperandRef<'tcx, &'ll Value>,
|
@@ -465,6 +607,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
|
465 | 607 | }
|
466 | 608 | "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
|
467 | 609 | "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
|
| 610 | + "powerpc" => emit_powerpc_va_arg(bx, addr, target_ty), |
468 | 611 | "powerpc64" | "powerpc64le" => emit_ptr_va_arg(
|
469 | 612 | bx,
|
470 | 613 | addr,
|
|
0 commit comments