Skip to content

Commit

Permalink
Added scripts related to visualization, running benchmarks etc.
Browse files Browse the repository at this point in the history
  • Loading branch information
CihatAltiparmak committed Jul 21, 2024
1 parent b91a243 commit 442503b
Show file tree
Hide file tree
Showing 5 changed files with 114 additions and 0 deletions.
Empty file.
Empty file.
5 changes: 5 additions & 0 deletions middleware_configurations/rmw_zenoh/config_rmw_zenoh.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
echo "The configurations for rmw_zenoh_cpp is started!"
# sudo sysctl -w "net.ipv4.tcp_rmem=4096 4096 4096"
# sudo sysctl -w "net.ipv4.tcp_wmem=4096 4096 4096"
# sudo sysctl -w "net.ipv4.tcp_mem=4096 4096 4096"
echo "The configurations for rmw_zenoh_cpp is finished!"
68 changes: 68 additions & 0 deletions scripts/box_plot_visualizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import matplotlib.pyplot as plt
import numpy as np
import json
import os
import sys

BENCHMARK_RESULTS_DIR=sys.argv[1]

middleware_colors = {
'rmw_zenoh_cpp' : 'orange',
'rmw_cyclonedds_cpp' : 'peachpuff',
'rmw_fastrtps_cpp' : 'tomato'}

middleware_list = ["rmw_zenoh_cpp", "rmw_cyclonedds_cpp", "rmw_fastrtps_cpp"]

def read_benchmark_json(file_name):
benchmark_json_data = None
with open(file_name) as f:
benchmark_json_data = json.load(f)
return benchmark_json_data

def get_real_time_list_from_benchmark_json(benchmark_json_data):
real_time_list = []
for benchmark_info in benchmark_json_data["benchmarks"]:
if benchmark_info["run_type"] == "iteration":
real_time_list.append(benchmark_info["real_time"])
return real_time_list

def get_middleware_dataset_for_scenario(scenario_name):
middleware_datasets = []
for middleware_name in middleware_list:
file_name = os.path.join(BENCHMARK_RESULTS_DIR, scenario_name, f"{middleware_name}.json")
benchmark_json_data = read_benchmark_json(file_name)
dataset = get_real_time_list_from_benchmark_json(benchmark_json_data)
middleware_datasets.append(
{
"name" : middleware_name,
"dataset" : dataset,
"color" : middleware_colors[middleware_name],
})
return middleware_datasets

def plot_dataset_of_scenario(plt, middleware_datasets):
labels = []
colors = []
datasets = []

for x in middleware_datasets:
labels.append(x["name"])
colors.append(x["color"])
datasets.append(x["dataset"])

fig, ax = plt.subplots()
ax.set_ylabel('real time (ns)')

bplot = ax.boxplot(datasets,
patch_artist=True,
tick_labels=labels)

# fill with colors
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)

for scenario_name in os.listdir(BENCHMARK_RESULTS_DIR):
middleware_datasets = get_middleware_dataset_for_scenario(scenario_name)
plot_dataset_of_scenario(plt, middleware_datasets)

plt.show()
41 changes: 41 additions & 0 deletions scripts/run_all_benchmarks.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Inspired from https://unix.stackexchange.com/questions/31414/how-can-i-pass-a-command-line-argument-into-a-shell-script
helpFunction()
{
echo ""
echo "Usage: $0 -i initial_script -m middleware_name -d benchmark_results_directory"
echo -i "\t-i initial_script to run once all benchmarks are started"
echo -e "\t-m selected middleware to benchmark"
echo -e "\t-c the directory the benchmark results is saved"
exit 1 # Exit script after printing help
}

while getopts "i:m:d:" opt
do
case "$opt" in
i ) initial_script="$OPTARG" ;;
m ) middleware_name="$OPTARG" ;;
d ) benchmark_results_directory="$OPTARG" ;;
? ) helpFunction ;; # Print helpFunction in case parameter is non-existent
esac
done

# Print helpFunction in case parameters are empty
if [ -z "$initial_script" ] || [ -z "$middleware_name" ] || [ -z "$benchmark_results_directory" ]
then
echo "Some or all of the parameters are empty";
helpFunction
fi

echo "middleware name is $middleware_name"
echo "benchmark results directory is $benchmark_results_directory"

echo "Benchmarking is starting!"
echo "Starting initial sctipts before bechmarks run!"
sh "$initial_script"
echo "Initial script has finished! Now starting to benchmark middleware with scenarios!"

# ros2 daemon stop
# ros2 launch moveit_middleware_benchmark scenario_basic_service_client_benchmark.launch.py benchmark_command_args:="--benchmark_out=middleware_bechmark_results.json --benchmark_out_format=json"

# ros2 daemon stop
# ros2 launch moveit_middleware_benchmark scenario_perception_pipeline_benchmark.launch.py benchmark_command_args:="--benchmark_out=middleware_bechmark_results.json --benchmark_out_format=json"

0 comments on commit 442503b

Please sign in to comment.