Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature: Adds tree view #223

Merged
merged 21 commits into from
Sep 7, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Get *very* basic tree-mode working
  • Loading branch information
ClementTsang committed Sep 4, 2020
commit 8c4ea0526b27fc5b673c1703e25f031078947144
1 change: 1 addition & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
"nvme",
"paren",
"pmem",
"ppid",
"prepush",
"processthreadsapi",
"regexes",
Expand Down
11 changes: 11 additions & 0 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,17 @@ impl App {
}
}

pub fn toggle_tree_mode(&mut self) {
if let Some(proc_widget_state) = self
.proc_state
.widget_states
.get_mut(&(self.current_widget.widget_id))
{
proc_widget_state.is_tree_mode = !proc_widget_state.is_tree_mode;
self.proc_state.force_update = Some(self.current_widget.widget_id);
}
}

/// One of two functions allowed to run while in a dialog...
pub fn on_enter(&mut self) {
if self.delete_dialog_state.is_showing_dd {
Expand Down
6 changes: 5 additions & 1 deletion src/app/data_harvester/processes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ impl Default for ProcessSorting {
#[derive(Debug, Clone, Default)]
pub struct ProcessHarvest {
pub pid: u32,
pub parent_pid: Option<u32>, // Remember, parent_pid 0 is root...
pub cpu_usage_percent: f64,
pub mem_usage_percent: f64,
pub mem_usage_bytes: u64,
Expand Down Expand Up @@ -200,7 +201,7 @@ fn read_path_contents(path: &PathBuf) -> std::io::Result<String> {

#[cfg(target_os = "linux")]
fn get_linux_process_state(stat: &[&str]) -> (char, String) {
// The -2 offset is because of us cutting off name + pid
// The -2 offset is because of us cutting off name + pid, normally it's 2
if let Some(first_char) = stat[0].chars().collect::<Vec<char>>().first() {
(
*first_char,
Expand Down Expand Up @@ -282,6 +283,7 @@ fn read_proc<S: core::hash::BuildHasher>(
&mut pid_stat.cpu_time,
use_current_cpu_total,
)?;
let parent_pid = stat[1].parse::<u32>().ok();
let (_vsize, rss) = get_linux_process_vsize_rss(&stat);
let mem_usage_kb = rss * page_file_kb;
let mem_usage_percent = mem_usage_kb as f64 / mem_total_kb as f64 * 100.0;
Expand Down Expand Up @@ -320,6 +322,7 @@ fn read_proc<S: core::hash::BuildHasher>(

Ok(ProcessHarvest {
pid,
parent_pid,
name,
command,
mem_usage_percent,
Expand Down Expand Up @@ -425,6 +428,7 @@ pub fn windows_macos_get_processes_list(

process_vector.push(ProcessHarvest {
pid: process_val.pid() as u32,
parent_pid: process_val.parent(),
name,
command,
mem_usage_percent: if mem_total_kb > 0 {
Expand Down
4 changes: 3 additions & 1 deletion src/app/states.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::{
constants,
data_harvester::processes::{self, ProcessSorting},
};
use ProcessSorting::*;

#[derive(Debug)]
pub enum ScrollDirection {
Expand Down Expand Up @@ -159,7 +160,6 @@ pub struct ProcColumn {

impl Default for ProcColumn {
fn default() -> Self {
use ProcessSorting::*;
let ordered_columns = vec![
Count,
Pid,
Expand Down Expand Up @@ -357,6 +357,7 @@ pub struct ProcWidgetState {
pub current_column_index: usize,
pub is_sort_open: bool,
pub columns: ProcColumn,
pub is_tree_mode: bool,
}

impl ProcWidgetState {
Expand Down Expand Up @@ -395,6 +396,7 @@ impl ProcWidgetState {
current_column_index: 0,
is_sort_open: false,
columns,
is_tree_mode: false,
}
}

Expand Down
12 changes: 2 additions & 10 deletions src/canvas/widgets/process_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ pub trait ProcessTableWidget {
widget_id: u64,
);

/// Draws the process sort box.
/// Draws the process search field.
/// - `widget_id` represents the widget ID of the search box itself --- NOT the process widget
/// state that is stored.
///
Expand Down Expand Up @@ -173,15 +173,6 @@ impl ProcessTableWidget for Painter {
.finalized_process_data_map
.get(&widget_id)
{
// Admittedly this is kinda a hack... but we need to:
// * Scroll
// * Show/hide elements based on scroll position
//
// As such, we use a process_counter to know when we've
// hit the process we've currently scrolled to.
// We also need to move the list - we can
// do so by hiding some elements!

let table_gap = if draw_loc.height < TABLE_GAP_HEIGHT_LIMIT {
0
} else {
Expand Down Expand Up @@ -592,6 +583,7 @@ impl ProcessTableWidget for Painter {
.iter()
.map(|column| Row::Data(vec![column].into_iter()));

// FIXME: [State] Shorten state to small form if it can't fit...?
let column_state = &mut proc_widget_state.columns.column_state;
column_state.select(Some(
proc_widget_state
Expand Down
117 changes: 90 additions & 27 deletions src/data_conversion.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! This mainly concerns converting collected data into things that the canvas
//! can actually handle.

use std::collections::HashMap;
use std::collections::{HashMap, VecDeque};

use crate::{
app::{data_farmer, data_harvester, App, Filter},
Expand Down Expand Up @@ -38,11 +38,14 @@ pub struct ConvertedNetworkData {
// mean_tx: f64,
}

// TODO: [REFACTOR] Process data... stuff really needs a rewrite. Again.
#[derive(Clone, Default, Debug)]
pub struct ConvertedProcessData {
pub pid: u32,
pub ppid: Option<u32>,
pub name: String,
pub command: String,
pub is_thread: Option<bool>,
pub cpu_percent_usage: f64,
pub mem_percent_usage: f64,
pub mem_usage_bytes: u64,
Expand All @@ -59,20 +62,6 @@ pub struct ConvertedProcessData {
pub process_state: String,
}

#[derive(Clone, Default, Debug)]
pub struct SingleProcessData {
pub pid: u32,
pub cpu_percent_usage: f64,
pub mem_percent_usage: f64,
pub mem_usage_bytes: u64,
pub group_pids: Vec<u32>,
pub read_per_sec: u64,
pub write_per_sec: u64,
pub total_read: u64,
pub total_write: u64,
pub process_state: String,
}

#[derive(Clone, Default, Debug)]
pub struct ConvertedCpuData {
pub cpu_name: String,
Expand Down Expand Up @@ -418,6 +407,9 @@ pub enum ProcessNamingType {
pub fn convert_process_data(
current_data: &data_farmer::DataCollection,
) -> Vec<ConvertedProcessData> {
// FIXME: Thread highlighting and hiding support
// For macOS see https://github.com/hishamhm/htop/pull/848/files

current_data
.process_harvest
.iter()
Expand All @@ -437,6 +429,8 @@ pub fn convert_process_data(

ConvertedProcessData {
pid: process.pid,
ppid: process.parent_pid,
is_thread: None,
name: process.name.to_string(),
command: process.command.to_string(),
cpu_percent_usage: process.cpu_usage_percent,
Expand All @@ -458,9 +452,76 @@ pub fn convert_process_data(
.collect::<Vec<_>>()
}

pub fn tree_process_data(
single_process_data: &[ConvertedProcessData],
) -> Vec<ConvertedProcessData> {
// Let's first build up a (really terrible) parent -> child mapping...
// At the same time, let's make a mapping of PID -> process data!
// TODO: ideally... I shouldn't have to do this... this seems kinda... geh.
let mut parent_child_mapping: HashMap<u32, Vec<u32>> = HashMap::default();
let mut pid_process_mapping: HashMap<u32, &ConvertedProcessData> = HashMap::default();

single_process_data.iter().for_each(|process| {
parent_child_mapping
.entry(process.ppid.unwrap_or(0))
.or_insert_with(Vec::new)
.push(process.pid);

// There should be no collisions...
if pid_process_mapping.contains_key(&process.pid) {
debug!("There was a PID collision!");
}
pid_process_mapping.insert(process.pid, process);
});

// Turn the parent-child mapping into a "list" via DFS...
let mut pids_to_explore: VecDeque<u32> = VecDeque::default();
let mut explored_pids: Vec<u32> = vec![0];
if let Some(zero_pid) = parent_child_mapping.get(&0) {
pids_to_explore.extend(zero_pid);
} else {
// FIXME: Remove this, this is for debugging
debug!("PID 0 had no children during tree building...");
}

while let Some(current_pid) = pids_to_explore.pop_front() {
explored_pids.push(current_pid);
if let Some(children) = parent_child_mapping.get(&current_pid) {
for child in children {
pids_to_explore.push_front(*child);
}
}
}

// Now let's "rearrange" our current list of converted process data into the correct
// order required...

explored_pids
.iter()
.filter_map(|pid| match pid_process_mapping.remove(pid) {
Some(proc) => Some(proc.clone()),
None => None,
})
.collect::<Vec<_>>()
}

pub fn group_process_data(
single_process_data: &[ConvertedProcessData], is_using_command: ProcessNamingType,
) -> Vec<ConvertedProcessData> {
#[derive(Clone, Default, Debug)]
struct SingleProcessData {
pub pid: u32,
pub cpu_percent_usage: f64,
pub mem_percent_usage: f64,
pub mem_usage_bytes: u64,
pub group_pids: Vec<u32>,
pub read_per_sec: f64,
pub write_per_sec: f64,
pub total_read: f64,
pub total_write: f64,
pub process_state: String,
}

let mut grouped_hashmap: HashMap<String, SingleProcessData> = std::collections::HashMap::new();

single_process_data.iter().for_each(|process| {
Expand All @@ -478,20 +539,20 @@ pub fn group_process_data(
(*entry).mem_percent_usage += process.mem_percent_usage;
(*entry).mem_usage_bytes += process.mem_usage_bytes;
(*entry).group_pids.push(process.pid);
(*entry).read_per_sec += process.rps_f64 as u64;
(*entry).write_per_sec += process.wps_f64 as u64;
(*entry).total_read += process.tr_f64 as u64;
(*entry).total_write += process.tw_f64 as u64;
(*entry).read_per_sec += process.rps_f64;
(*entry).write_per_sec += process.wps_f64;
(*entry).total_read += process.tr_f64;
(*entry).total_write += process.tw_f64;
});

grouped_hashmap
.iter()
.map(|(identifier, process_details)| {
let p = process_details.clone();
let converted_rps = get_exact_byte_values(p.read_per_sec, false);
let converted_wps = get_exact_byte_values(p.write_per_sec, false);
let converted_total_read = get_exact_byte_values(p.total_read, false);
let converted_total_write = get_exact_byte_values(p.total_write, false);
let converted_rps = get_exact_byte_values(p.read_per_sec as u64, false);
let converted_wps = get_exact_byte_values(p.write_per_sec as u64, false);
let converted_total_read = get_exact_byte_values(p.total_read as u64, false);
let converted_total_write = get_exact_byte_values(p.total_write as u64, false);

let read_per_sec = format!("{:.*}{}/s", 0, converted_rps.0, converted_rps.1);
let write_per_sec = format!("{:.*}{}/s", 0, converted_wps.0, converted_wps.1);
Expand All @@ -503,6 +564,8 @@ pub fn group_process_data(

ConvertedProcessData {
pid: p.pid,
ppid: None,
is_thread: None,
name: identifier.to_string(),
command: identifier.to_string(),
cpu_percent_usage: p.cpu_percent_usage,
Expand All @@ -514,10 +577,10 @@ pub fn group_process_data(
write_per_sec,
total_read,
total_write,
rps_f64: p.read_per_sec as f64,
wps_f64: p.write_per_sec as f64,
tr_f64: p.total_read as f64,
tw_f64: p.total_write as f64,
rps_f64: p.read_per_sec,
wps_f64: p.write_per_sec,
tr_f64: p.total_read,
tw_f64: p.total_write,
process_state: p.process_state,
}
})
Expand Down
Loading