Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clarity improvements to DropTree #122080

Merged
merged 4 commits into from
Mar 11, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Replace tuples in DropTree with named structs
This allows us to use real field names instead of tuple element numbers.

Renaming `previous_drops` to `existing_drops_map` clarifies that "previous" was
unrelated to drop order.
  • Loading branch information
Zalathar committed Mar 6, 2024
commit 5ba70bd3ec6cedfe49332226dc9dbfe427698ad3
107 changes: 64 additions & 43 deletions compiler/rustc_mir_build/src/build/scope.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,16 +203,31 @@ const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
/// in `build_mir`.
#[derive(Debug)]
struct DropTree {
/// Drops in the tree.
drops: IndexVec<DropIdx, (DropData, DropIdx)>,
/// Map for finding the inverse of the `next_drop` relation:
///
/// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i`
previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>,
/// Nodes in the drop tree, containing drop data and a link to the next node.
drops: IndexVec<DropIdx, DropNode>,
/// Map for finding the index of an existing node, given its contents.
existing_drops_map: FxHashMap<DropNodeKey, DropIdx>,
/// Edges into the `DropTree` that need to be added once it's lowered.
entry_points: Vec<(DropIdx, BasicBlock)>,
}

/// A single node in the drop tree.
#[derive(Debug)]
struct DropNode {
/// Info about the drop to be performed at this node in the drop tree.
data: DropData,
/// Index of the "next" drop to perform (in drop order, not declaration order).
next: DropIdx,
}

/// Subset of [`DropNode`] used for reverse lookup in a hash table.
#[derive(Debug, PartialEq, Eq, Hash)]
struct DropNodeKey {
next: DropIdx,
local: Local,
kind: DropKind,
}

impl Scope {
/// Whether there's anything to do for the cleanup path, that is,
/// when unwinding through this scope. This includes destructors,
Expand Down Expand Up @@ -258,17 +273,22 @@ impl DropTree {
let fake_source_info = SourceInfo::outermost(DUMMY_SP);
let fake_data =
DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
let drop_idx = DropIdx::MAX;
let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1);
Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() }
let drops = IndexVec::from_raw(vec![DropNode { data: fake_data, next: DropIdx::MAX }]);
Self { drops, entry_points: Vec::new(), existing_drops_map: FxHashMap::default() }
}

fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx {
/// Adds a node to the drop tree, consisting of drop data and the index of
/// the "next" drop (in drop order), which could be the sentinel [`ROOT_NODE`].
///
/// If there is already an equivalent node in the tree, nothing is added, and
/// that node's index is returned. Otherwise, the new node's index is returned.
fn add_drop(&mut self, data: DropData, next: DropIdx) -> DropIdx {
let drops = &mut self.drops;
*self
.previous_drops
.entry((next, drop.local, drop.kind))
.or_insert_with(|| drops.push((drop, next)))
.existing_drops_map
.entry(DropNodeKey { next, local: data.local, kind: data.kind })
// Create a new node, and also add its index to the map.
.or_insert_with(|| drops.push(DropNode { data, next }))
}

/// Registers `from` as an entry point to this drop tree, at `to`.
Expand Down Expand Up @@ -330,7 +350,7 @@ impl DropTree {
let entry_points = &mut self.entry_points;
entry_points.sort();

for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
for (drop_idx, drop_node) in self.drops.iter_enumerated().rev() {
if entry_points.last().is_some_and(|entry_point| entry_point.0 == drop_idx) {
let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
needs_block[drop_idx] = Block::Own;
Expand All @@ -348,10 +368,10 @@ impl DropTree {
blocks[drop_idx] = blocks[pred];
}
}
if let DropKind::Value = drop_data.0.kind {
needs_block[drop_data.1] = Block::Own;
if let DropKind::Value = drop_node.data.kind {
needs_block[drop_node.next] = Block::Own;
} else if drop_idx != ROOT_NODE {
match &mut needs_block[drop_data.1] {
match &mut needs_block[drop_node.next] {
pred @ Block::None => *pred = Block::Shares(drop_idx),
pred @ Block::Shares(_) => *pred = Block::Own,
Block::Own => (),
Expand All @@ -368,34 +388,35 @@ impl DropTree {
cfg: &mut CFG<'tcx>,
blocks: &IndexSlice<DropIdx, Option<BasicBlock>>,
) {
for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
for (drop_idx, drop_node) in self.drops.iter_enumerated().rev() {
let Some(block) = blocks[drop_idx] else { continue };
match drop_data.0.kind {
match drop_node.data.kind {
DropKind::Value => {
let terminator = TerminatorKind::Drop {
target: blocks[drop_data.1].unwrap(),
target: blocks[drop_node.next].unwrap(),
// The caller will handle this if needed.
unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
place: drop_data.0.local.into(),
place: drop_node.data.local.into(),
replace: false,
};
cfg.terminate(block, drop_data.0.source_info, terminator);
cfg.terminate(block, drop_node.data.source_info, terminator);
}
// Root nodes don't correspond to a drop.
DropKind::Storage if drop_idx == ROOT_NODE => {}
DropKind::Storage => {
let stmt = Statement {
source_info: drop_data.0.source_info,
kind: StatementKind::StorageDead(drop_data.0.local),
source_info: drop_node.data.source_info,
kind: StatementKind::StorageDead(drop_node.data.local),
};
cfg.push(block, stmt);
let target = blocks[drop_data.1].unwrap();
let target = blocks[drop_node.next].unwrap();
if target != block {
// Diagnostics don't use this `Span` but debuginfo
// might. Since we don't want breakpoints to be placed
// here, especially when this is on an unwind path, we
// use `DUMMY_SP`.
let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info };
let source_info =
SourceInfo { span: DUMMY_SP, ..drop_node.data.source_info };
let terminator = TerminatorKind::Goto { target };
cfg.terminate(block, source_info, terminator);
}
Expand Down Expand Up @@ -1277,9 +1298,9 @@ fn build_scope_drops<'tcx>(
// `unwind_to` should drop the value that we're about to
// schedule. If dropping this value panics, then we continue
// with the *next* value on the unwind path.
debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].1;
debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].next;

// If the operand has been moved, and we are not on an unwind
// path, then don't generate the drop. (We only take this into
Expand All @@ -1306,9 +1327,9 @@ fn build_scope_drops<'tcx>(
}
DropKind::Storage => {
if storage_dead_on_unwind {
debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].1;
debug_assert_eq!(unwind_drops.drops[unwind_to].data.local, drop_data.local);
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].next;
}
// Only temps and vars need their storage dead.
assert!(local.index() > arg_count);
Expand Down Expand Up @@ -1338,30 +1359,30 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
let is_coroutine = self.coroutine.is_some();

// Link the exit drop tree to unwind drop tree.
if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
if drops.drops.iter().any(|drop_node| drop_node.data.kind == DropKind::Value) {
let unwind_target = self.diverge_cleanup_target(else_scope, span);
let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
match drop_data.0.kind {
for (drop_idx, drop_node) in drops.drops.iter_enumerated().skip(1) {
match drop_node.data.kind {
DropKind::Storage => {
if is_coroutine {
let unwind_drop = self
.scopes
.unwind_drops
.add_drop(drop_data.0, unwind_indices[drop_data.1]);
.add_drop(drop_node.data, unwind_indices[drop_node.next]);
unwind_indices.push(unwind_drop);
} else {
unwind_indices.push(unwind_indices[drop_data.1]);
unwind_indices.push(unwind_indices[drop_node.next]);
}
}
DropKind::Value => {
let unwind_drop = self
.scopes
.unwind_drops
.add_drop(drop_data.0, unwind_indices[drop_data.1]);
.add_drop(drop_node.data, unwind_indices[drop_node.next]);
self.scopes.unwind_drops.add_entry_point(
blocks[drop_idx].unwrap(),
unwind_indices[drop_data.1],
unwind_indices[drop_node.next],
);
unwind_indices.push(unwind_drop);
}
Expand Down Expand Up @@ -1412,10 +1433,10 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
// prevent drop elaboration from creating drop flags that would have
// to be captured by the coroutine. I'm not sure how important this
// optimization is, but it is here.
for (drop_idx, drop_data) in drops.drops.iter_enumerated() {
if let DropKind::Value = drop_data.0.kind {
debug_assert!(drop_data.1 < drops.drops.next_index());
drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap()));
for (drop_idx, drop_node) in drops.drops.iter_enumerated() {
if let DropKind::Value = drop_node.data.kind {
debug_assert!(drop_node.next < drops.drops.next_index());
drops.entry_points.push((drop_node.next, blocks[drop_idx].unwrap()));
}
}
Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
Expand Down
Loading