Skip to content

Commit

Permalink
Various cleanups to the ABI handling code (part 1) (bytecodealliance#…
Browse files Browse the repository at this point in the history
…8903)

* Inline abi_arg_slot_regs into gen_retval

This simplifies the code a bit.

* Remove abi argument of gen_call_common

It isn't actually necessary.

* Define gen_call_common without macro

Rust-analyzer disables most ide functionality inside macros. Defining
gen_call_common without a macro makes it easier to modify it.

* Use mem::take instead of mem::replace
  • Loading branch information
bjorn3 authored Jul 16, 2024
1 parent 51948ef commit 50d82f2
Show file tree
Hide file tree
Showing 6 changed files with 104 additions and 146 deletions.
5 changes: 0 additions & 5 deletions cranelift/codegen/src/isa/aarch64/lower/isle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm, ReturnCallInf
use crate::isa::aarch64::AArch64Backend;
use crate::isle_common_prelude_methods;
use crate::machinst::isle::*;
use crate::machinst::valueregs;
use crate::{
binemit::CodeOffset,
ir::{
Expand Down Expand Up @@ -70,10 +69,6 @@ pub struct ExtendedValue {
extend: ExtendOp,
}

impl IsleContext<'_, '_, MInst, AArch64Backend> {
isle_prelude_method_helpers!(AArch64CallSite);
}

impl Context for IsleContext<'_, '_, MInst, AArch64Backend> {
isle_lower_prelude_methods!();
isle_prelude_caller_methods!(
Expand Down
5 changes: 1 addition & 4 deletions cranelift/codegen/src/isa/riscv64/lower/isle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// Pull in the ISLE generated code.
#[allow(unused)]
pub mod generated_code;
use generated_code::{Context, MInst};
use generated_code::MInst;

// Types that the generated ISLE code uses via `use super::*`.
use self::generated_code::{VecAluOpRR, VecLmul};
Expand Down Expand Up @@ -34,7 +34,6 @@ type BoxReturnCallInfo = Box<ReturnCallInfo>;
type BoxExternalName = Box<ExternalName>;
type VecMachLabel = Vec<MachLabel>;
type VecArgPair = Vec<ArgPair>;
use crate::machinst::valueregs;

pub(crate) struct RV64IsleContext<'a, 'b, I, B>
where
Expand All @@ -49,8 +48,6 @@ where
}

impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {
isle_prelude_method_helpers!(Riscv64ABICallSite);

fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {
Self {
lower_ctx,
Expand Down
14 changes: 8 additions & 6 deletions cranelift/codegen/src/isa/x64/lower.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::Flags;
use smallvec::smallvec;
use smallvec::{smallvec, SmallVec};
use target_lexicon::Triple;

//=============================================================================
Expand Down Expand Up @@ -152,8 +152,7 @@ fn emit_vm_call(
triple: &Triple,
libcall: LibCall,
inputs: &[Reg],
outputs: &[Writable<Reg>],
) -> CodegenResult<()> {
) -> CodegenResult<SmallVec<[Reg; 1]>> {
let extname = ExternalName::LibCall(libcall);

let dist = if flags.use_colocated_libcalls() {
Expand Down Expand Up @@ -182,8 +181,11 @@ fn emit_vm_call(
}

let mut retval_insts: SmallInstVec<_> = smallvec![];
for (i, output) in outputs.iter().enumerate() {
retval_insts.extend(abi.gen_retval(ctx, i, ValueRegs::one(*output)).into_iter());
let mut outputs: SmallVec<[_; 1]> = smallvec![];
for i in 0..ctx.sigs().num_rets(ctx.sigs().abi_sig_for_signature(&sig)) {
let (retval_inst, retval_regs) = abi.gen_retval(ctx, i);
retval_insts.extend(retval_inst.into_iter());
outputs.push(retval_regs.only_reg().unwrap());
}

abi.emit_call(ctx);
Expand All @@ -192,7 +194,7 @@ fn emit_vm_call(
ctx.emit(inst);
}

Ok(())
Ok(outputs)
}

/// Returns whether the given input is a shift by a constant value less or equal than 3.
Expand Down
39 changes: 14 additions & 25 deletions cranelift/codegen/src/isa/x64/lower/isle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ use crate::{
inst::{args::*, regs, CallInfo, ReturnCallInfo},
},
machinst::{
isle::*, valueregs, ArgPair, InsnInput, InstOutput, MachAtomicRmwOp, MachInst,
VCodeConstant, VCodeConstantData,
isle::*, ArgPair, InsnInput, InstOutput, MachAtomicRmwOp, MachInst, VCodeConstant,
VCodeConstantData,
},
};
use alloc::vec::Vec;
Expand Down Expand Up @@ -689,57 +689,48 @@ impl Context for IsleContext<'_, '_, MInst, X64Backend> {
}

fn libcall_1(&mut self, libcall: &LibCall, a: Reg) -> Reg {
let call_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let ret_ty = libcall.signature(call_conv, I64).returns[0].value_type;
let output_reg = self.lower_ctx.alloc_tmp(ret_ty).only_reg().unwrap();

emit_vm_call(
let outputs = emit_vm_call(
self.lower_ctx,
&self.backend.flags,
&self.backend.triple,
libcall.clone(),
&[a],
&[output_reg],
)
.expect("Failed to emit LibCall");

output_reg.to_reg()
debug_assert_eq!(outputs.len(), 1);

outputs[0]
}

fn libcall_2(&mut self, libcall: &LibCall, a: Reg, b: Reg) -> Reg {
let call_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let ret_ty = libcall.signature(call_conv, I64).returns[0].value_type;
let output_reg = self.lower_ctx.alloc_tmp(ret_ty).only_reg().unwrap();

emit_vm_call(
let outputs = emit_vm_call(
self.lower_ctx,
&self.backend.flags,
&self.backend.triple,
libcall.clone(),
&[a, b],
&[output_reg],
)
.expect("Failed to emit LibCall");

output_reg.to_reg()
debug_assert_eq!(outputs.len(), 1);

outputs[0]
}

fn libcall_3(&mut self, libcall: &LibCall, a: Reg, b: Reg, c: Reg) -> Reg {
let call_conv = self.lower_ctx.abi().call_conv(self.lower_ctx.sigs());
let ret_ty = libcall.signature(call_conv, I64).returns[0].value_type;
let output_reg = self.lower_ctx.alloc_tmp(ret_ty).only_reg().unwrap();

emit_vm_call(
let outputs = emit_vm_call(
self.lower_ctx,
&self.backend.flags,
&self.backend.triple,
libcall.clone(),
&[a, b, c],
&[output_reg],
)
.expect("Failed to emit LibCall");

output_reg.to_reg()
debug_assert_eq!(outputs.len(), 1);

outputs[0]
}

#[inline]
Expand Down Expand Up @@ -1005,8 +996,6 @@ impl Context for IsleContext<'_, '_, MInst, X64Backend> {
}

impl IsleContext<'_, '_, MInst, X64Backend> {
isle_prelude_method_helpers!(X64CallSite);

fn load_xmm_unaligned(&mut self, addr: SyntheticAmode) -> Xmm {
let tmp = self.lower_ctx.alloc_tmp(types::F32X4).only_reg().unwrap();
self.lower_ctx.emit(MInst::XmmUnaryRmRUnaligned {
Expand Down
47 changes: 30 additions & 17 deletions cranelift/codegen/src/machinst/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2094,6 +2094,11 @@ impl<M: ABIMachineSpec> CallSite<M> {
sigs.num_args(self.sig)
}

/// Get the number of return values expected.
pub fn num_rets(&self, sigs: &SigSet) -> usize {
sigs.num_rets(self.sig)
}

/// Emit a copy of a large argument into its associated stack buffer, if
/// any. We must be careful to perform all these copies (as necessary)
/// before setting up the argument registers, since we may have to invoke
Expand Down Expand Up @@ -2297,47 +2302,57 @@ impl<M: ABIMachineSpec> CallSite<M> {
/// Define a return value after the call returns.
pub fn gen_retval(
&mut self,
ctx: &Lower<M::I>,
ctx: &mut Lower<M::I>,
idx: usize,
into_regs: ValueRegs<Writable<Reg>>,
) -> SmallInstVec<M::I> {
) -> (SmallInstVec<M::I>, ValueRegs<Reg>) {
let mut insts = smallvec![];
match &ctx.sigs().rets(self.sig)[idx] {
&ABIArg::Slots { ref slots, .. } => {
assert_eq!(into_regs.len(), slots.len());
for (slot, into_reg) in slots.iter().zip(into_regs.regs().iter()) {
let mut into_regs: SmallVec<[Reg; 2]> = smallvec![];
let ret = ctx.sigs().rets(self.sig)[idx].clone();
match ret {
ABIArg::Slots { ref slots, .. } => {
for slot in slots {
match slot {
// Extension mode doesn't matter because we're copying out, not in,
// and we ignore high bits in our own registers by convention.
&ABIArgSlot::Reg { reg, .. } => {
&ABIArgSlot::Reg { reg, ty, .. } => {
let into_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
self.defs.push(CallRetPair {
vreg: *into_reg,
vreg: into_reg,
preg: reg.into(),
});
into_regs.push(into_reg.to_reg());
}
&ABIArgSlot::Stack { offset, ty, .. } => {
let into_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
let sig_data = &ctx.sigs()[self.sig];
// The outgoing argument area must always be restored after a call,
// ensuring that the return values will be in a consistent place after
// any call.
let ret_area_base = sig_data.sized_stack_arg_space();
insts.push(M::gen_load_stack(
StackAMode::OutgoingArg(offset + ret_area_base),
*into_reg,
into_reg,
ty,
));
into_regs.push(into_reg.to_reg());
}
}
}
}
&ABIArg::StructArg { .. } => {
ABIArg::StructArg { .. } => {
panic!("StructArg not supported in return position");
}
&ABIArg::ImplicitPtrArg { .. } => {
ABIArg::ImplicitPtrArg { .. } => {
panic!("ImplicitPtrArg not supported in return position");
}
}
insts

let value_regs = match *into_regs {
[a] => ValueRegs::one(a),
[a, b] => ValueRegs::two(a, b),
_ => panic!("Expected to see one or two slots only from {:?}", ret),
};
(insts, value_regs)
}

/// Emit the call itself.
Expand Down Expand Up @@ -2365,10 +2380,8 @@ impl<M: ABIMachineSpec> CallSite<M> {
self.gen_arg(ctx, i.into(), ValueRegs::one(rd.to_reg()));
}

let (uses, defs) = (
mem::replace(&mut self.uses, Default::default()),
mem::replace(&mut self.defs, Default::default()),
);
let uses = mem::take(&mut self.uses);
let defs = mem::take(&mut self.defs);

let sig = &ctx.sigs()[self.sig];
let callee_pop_size = if sig.call_conv() == isa::CallConv::Tail {
Expand Down
Loading

0 comments on commit 50d82f2

Please sign in to comment.