Monte Carlo Benchmarking Engine
High-performance SIMD Monte Carlo engine (AVX2/NEON) with custom memory allocators and perf logging.
 
Loading...
Searching...
No Matches
run_perf.sh
Go to the documentation of this file.
1#!/bin/bash
2# ===========================================
3# run_perf.sh
4# ===========================================
5
6
83
84
85ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
86export PYTHONPATH="$ROOT_DIR:$PYTHONPATH"
87
88set -e
89
90# -------- Config --------
91DEFAULT_TRIALS=100000000
92ALL_METHODS=("Sequential" "Heap" "Pool" "SIMD")
93SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
94BATCHID=$(uuidgen | cut -d'-' -f1)
95BUILD_PATH="./build/montecarlo"
96GLOBAL_TIMESTAMP=$(date "+%Y-%m-%d_%H-%M-%S")
97
98# -------- CLI Args --------
99ARG1="$1"
100ARG2="$2"
101$ARG3=$3
102
103INSERT_DB=true
104
105if [[ "$ARG3" == "insert_db=false" ]]; then
106 INSERT_DB=false
107fi
108
109if [[ -z "$ARG1" && -z "$ARG2" ]]; then
110 TRIALS=$DEFAULT_TRIALS
111 METHODS=("${ALL_METHODS[@]}")
112elif [[ "$ARG1" =~ ^[0-9]+$ && -z "$ARG2" ]]; then
113 TRIALS="$ARG1"
114 METHODS=("${ALL_METHODS[@]}")
115elif [[ "$ARG1" =~ ^[a-zA-Z]+$ && -z "$ARG2" ]]; then
116 TRIALS=$DEFAULT_TRIALS
117 METHODS=("$ARG1")
118else
119 TRIALS="$ARG1"
120 METHODS=("$ARG2")
121fi
122
123# -------- Info --------
124echo "[INFO] Trials : $TRIALS"
125echo "[INFO] Methods : ${METHODS[*]}"
126echo "[INFO] Batch ID : $BATCHID"
127echo "[INFO] Timestamp: $GLOBAL_TIMESTAMP"
128
129LOG_DIR="db/logs/batch_${BATCHID}_${GLOBAL_TIMESTAMP}"
130mkdir -p "$LOG_DIR"
131
132# -------- Perf Event Creation --------
133PERF_EVENTS=""
134PERF_EVENTS+="cycles,instructions,"
135PERF_EVENTS+="cache-references,cache-misses,"
136PERF_EVENTS+="branch-instructions,branch-misses,"
137PERF_EVENTS+="L1-dcache-loads,L1-dcache-load-misses,"
138PERF_EVENTS+="dTLB-loads,dTLB-load-misses"
139
140echo "[INFO] Using perf events:"
141echo "$PERF_EVENTS" | tr ',' '\n' | sed 's/^/ - /'
142
143# -------- Run Each Method --------
144for METHOD in "${METHODS[@]}"; do
145 METHOD_TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
146 echo "[▶] Running: $METHOD"
147
148 LOG_PATH="$LOG_DIR/perf_${METHOD}_${METHOD_TIMESTAMP}.csv"
149 PERF_PARQUET="$LOG_DIR/perf_results_${METHOD}_${METHOD_TIMESTAMP}_${BATCHID}.parquet"
150
151 mkdir -p "$(dirname "$LOG_PATH")"
152
153 START_NS=$(date +%s%N)
154 perf stat -x, -o "$LOG_PATH" -e $PERF_EVENTS "$BUILD_PATH" "$TRIALS" "$METHOD" > /dev/null
155 END=$(date +%s%N)
156
157 WALL_NS=$((END - START_NS))
158 WALL_S=$(awk "BEGIN {printf \"%.6f\", $WALL_NS / 1000000000}")
159
160 # Pull metrics into shell vars
161 eval "$(python3 pipeline/parse_perf_metrics.py "$LOG_PATH" "$TRIALS")"
162
163 python3 pipeline/gen_perf_parquet_logs.py \
164 --out_path "$PERF_PARQUET" \
165 --wall_time_s "$WALL_S" \
166 --wall_time_ns "$WALL_NS" \
167 --timestamp "$METHOD_TIMESTAMP" \
168 --batchid "$BATCHID" \
169 --method "$METHOD" \
170 --trials "$TRIALS" \
171 --cycles "$CYCLES" \
172 --instr "$INSTR" \
173 --ipc "$IPC" \
174 --cache_loads "$CACHE_LOADS" \
175 --cache_miss "$CACHE_MISS" \
176 --l1_loads "$L1_LOADS" \
177 --l1_misses "$L1_MISSES" \
178 --l2_loads "$L2_LOADS" \
179 --l2_misses "$L2_MISSES" \
180 --l3_loads "$L3_LOADS" \
181 --l3_misses "$L3_MISSES" \
182 --tlb_loads "$TLB_LOADS" \
183 --tlb_misses "$TLB_MISSES" \
184 --branch_instr "$BRANCH_INSTR" \
185 --branch_misses "$BRANCH_MISSES" \
186 --miss_per_trial "$MISS_PER_TRIAL" \
187 --cycles_per_trial "$CYCLES_PER_TRIAL"
188done
189
190python3 pipeline/combine_batch_parquets.py \
191 "$LOG_DIR" \
192 "$LOG_DIR/perf_results_all_${BATCHID}.parquet"
193
194if [ "$INSERT_DB" = true ]; then
195 python3 pipeline/insert_to_clickhouse.py \
196 --batchid "$BATCHID"
197else
198 echo "[INFO] Skipping ClickHouse insertion (insert_db=false)"
199fi
200
201echo "[INFO] Simulation Finished:"
202echo " └─ Exported CSV & Parquet logs to : $LOG_DIR"
203echo " └─ Combined batch Parquet logs : $LOG_DIR/perf_results_all_${BATCHID}.parquet"
204