From 922a6865164db93484e49523aa6ec451c273bd6e Mon Sep 17 00:00:00 2001 From: Sam Zhou Date: Mon, 4 May 2026 15:13:52 -0500 Subject: [PATCH 1/3] Add CaloClusterGNN training pipeline Adds a CaloClusterGNN/ subdirectory containing the full training pipeline for the GNN calorimeter-clustering algorithm intended as a parallel to the existing seed+BFS CaloClusterMaker in Mu2e Offline. The deployed recipe is "CCN+BFS10": CaloClusterNet edge classifier + BFS-style traversal with ExpandCut = 10 MeV. Layout (modelled on TrkQual/, but Python-package shaped): CaloClusterGNN/ README.md how to retrain, frozen hyperparams, deployment cross-link setup_env.sh wraps setupmu2e-art.sh + ana 2.6.1 src/ data/ graph builder, calo-entrant truth labels, normalisation, packed dataset geometry/ crystalId -> (x, y, disk) loader models/ SimpleEdgeNet, CaloClusterNet, layers, heads training/ losses, metrics, trainer inference/ cluster reconstruction (cluster_reco.py), postprocess (kept here so train-time eval scripts work end to end) scripts/ build/pack/train/tune/evaluate pipeline, failure audits, cluster-physics eval, ancestry validation, run1B no-field eval, plotting configs/ five YAML configs (one per training run) tests/ 88 unit tests covering all of src/ above splits/ frozen 35/7/8 v2 split file lists data/ crystal_geometry.csv + crystal_neighbors.csv + crystal_map_raw.csv (small lookup tables) What does NOT live here: * The deployment-side ONNX export / parity scripts (export_onnx.py, export_norm_stats.py, validate_onnx.py, dump_parity_payloads.py, compare_parity_dump.py) and the deploy wrappers (calo_cluster_net_deploy.py, simple_edge_net_deploy.py) -- those belong with the Mu2e/Offline integration PR, not the training repo. * The `.onnx` artifacts themselves (shipped via Mu2e data area, not versioned in MLTrain -- same convention TrkQual follows). * Large run outputs and processed graphs (regenerable from EventNtuple ROOT files via scripts/build_all_graphs.sh). The v2 training data requires the `calomcsim.ancestorSimIds` branch added in Mu2e/EventNtuple (PR pending). README cross-links there once the EventNtuple PR has a number. Test suite: 88/88 passing in this layout via `python3 -m unittest discover -s tests -p "test_*.py" -v` after `source setup_env.sh`. --- CaloClusterGNN/README.md | 198 ++ CaloClusterGNN/configs/calo_cluster_net.yaml | 57 + .../configs/calo_cluster_net_saliency.yaml | 56 + .../configs/calo_cluster_net_stage2.yaml | 55 + CaloClusterGNN/configs/default.yaml | 55 + CaloClusterGNN/data/crystal_geometry.csv | 1349 ++++++++ CaloClusterGNN/data/crystal_map_raw.csv | 2741 +++++++++++++++++ CaloClusterGNN/data/crystal_neighbors.csv | 1349 ++++++++ CaloClusterGNN/scripts/analysis_for_sophie.py | 455 +++ CaloClusterGNN/scripts/build_all_graphs.sh | 34 + CaloClusterGNN/scripts/build_graphs.py | 140 + .../scripts/evaluate_cluster_physics.py | 792 +++++ CaloClusterGNN/scripts/evaluate_new_truth.py | 499 +++ CaloClusterGNN/scripts/evaluate_run1b.py | 664 ++++ CaloClusterGNN/scripts/evaluate_test.py | 750 +++++ CaloClusterGNN/scripts/failure_audit.py | 561 ++++ CaloClusterGNN/scripts/make_slide_plots.py | 369 +++ CaloClusterGNN/scripts/pack_graphs.py | 70 + CaloClusterGNN/scripts/plot_crystal_map.py | 168 + CaloClusterGNN/scripts/plot_gnn_clusters.py | 777 +++++ .../scripts/plot_new_truth_clusters.py | 656 ++++ CaloClusterGNN/scripts/plot_training.py | 194 ++ CaloClusterGNN/scripts/smoke_test_env.py | 85 + CaloClusterGNN/scripts/train_gnn.py | 170 + CaloClusterGNN/scripts/tune_threshold.py | 506 +++ CaloClusterGNN/scripts/validate_ancestry.py | 262 ++ CaloClusterGNN/setup_env.sh | 20 + CaloClusterGNN/splits/.gitkeep | 0 CaloClusterGNN/splits/test_files.txt | 8 + CaloClusterGNN/splits/train_files.txt | 35 + CaloClusterGNN/splits/val_files.txt | 7 + CaloClusterGNN/src/__init__.py | 13 + CaloClusterGNN/src/data/__init__.py | 9 + CaloClusterGNN/src/data/dataset.py | 270 ++ CaloClusterGNN/src/data/graph_builder.py | 250 ++ CaloClusterGNN/src/data/normalization.py | 109 + CaloClusterGNN/src/data/truth_labels.py | 86 + .../src/data/truth_labels_primary.py | 171 + CaloClusterGNN/src/geometry/README.md | 24 + CaloClusterGNN/src/geometry/__init__.py | 6 + .../src/geometry/crystal_geometry.py | 64 + CaloClusterGNN/src/inference/__init__.py | 7 + CaloClusterGNN/src/inference/cluster_reco.py | 319 ++ CaloClusterGNN/src/inference/postprocess.py | 109 + CaloClusterGNN/src/models/__init__.py | 43 + CaloClusterGNN/src/models/calo_cluster_net.py | 101 + CaloClusterGNN/src/models/heads.py | 79 + CaloClusterGNN/src/models/layers.py | 93 + CaloClusterGNN/src/models/simple_edge_net.py | 124 + CaloClusterGNN/src/training/__init__.py | 8 + CaloClusterGNN/src/training/losses.py | 177 ++ CaloClusterGNN/src/training/metrics.py | 196 ++ CaloClusterGNN/src/training/trainer.py | 287 ++ CaloClusterGNN/tests/test_calo_cluster_net.py | 219 ++ CaloClusterGNN/tests/test_graph_builder.py | 279 ++ CaloClusterGNN/tests/test_inference.py | 179 ++ CaloClusterGNN/tests/test_postprocess.py | 144 + CaloClusterGNN/tests/test_truth_labels.py | 173 ++ .../tests/test_truth_labels_primary.py | 255 ++ 59 files changed, 16876 insertions(+) create mode 100644 CaloClusterGNN/README.md create mode 100644 CaloClusterGNN/configs/calo_cluster_net.yaml create mode 100644 CaloClusterGNN/configs/calo_cluster_net_saliency.yaml create mode 100644 CaloClusterGNN/configs/calo_cluster_net_stage2.yaml create mode 100644 CaloClusterGNN/configs/default.yaml create mode 100644 CaloClusterGNN/data/crystal_geometry.csv create mode 100644 CaloClusterGNN/data/crystal_map_raw.csv create mode 100644 CaloClusterGNN/data/crystal_neighbors.csv create mode 100644 CaloClusterGNN/scripts/analysis_for_sophie.py create mode 100644 CaloClusterGNN/scripts/build_all_graphs.sh create mode 100644 CaloClusterGNN/scripts/build_graphs.py create mode 100644 CaloClusterGNN/scripts/evaluate_cluster_physics.py create mode 100644 CaloClusterGNN/scripts/evaluate_new_truth.py create mode 100644 CaloClusterGNN/scripts/evaluate_run1b.py create mode 100644 CaloClusterGNN/scripts/evaluate_test.py create mode 100644 CaloClusterGNN/scripts/failure_audit.py create mode 100644 CaloClusterGNN/scripts/make_slide_plots.py create mode 100644 CaloClusterGNN/scripts/pack_graphs.py create mode 100644 CaloClusterGNN/scripts/plot_crystal_map.py create mode 100644 CaloClusterGNN/scripts/plot_gnn_clusters.py create mode 100644 CaloClusterGNN/scripts/plot_new_truth_clusters.py create mode 100644 CaloClusterGNN/scripts/plot_training.py create mode 100644 CaloClusterGNN/scripts/smoke_test_env.py create mode 100644 CaloClusterGNN/scripts/train_gnn.py create mode 100644 CaloClusterGNN/scripts/tune_threshold.py create mode 100644 CaloClusterGNN/scripts/validate_ancestry.py create mode 100644 CaloClusterGNN/setup_env.sh create mode 100644 CaloClusterGNN/splits/.gitkeep create mode 100644 CaloClusterGNN/splits/test_files.txt create mode 100644 CaloClusterGNN/splits/train_files.txt create mode 100644 CaloClusterGNN/splits/val_files.txt create mode 100644 CaloClusterGNN/src/__init__.py create mode 100644 CaloClusterGNN/src/data/__init__.py create mode 100644 CaloClusterGNN/src/data/dataset.py create mode 100644 CaloClusterGNN/src/data/graph_builder.py create mode 100644 CaloClusterGNN/src/data/normalization.py create mode 100644 CaloClusterGNN/src/data/truth_labels.py create mode 100644 CaloClusterGNN/src/data/truth_labels_primary.py create mode 100644 CaloClusterGNN/src/geometry/README.md create mode 100644 CaloClusterGNN/src/geometry/__init__.py create mode 100644 CaloClusterGNN/src/geometry/crystal_geometry.py create mode 100644 CaloClusterGNN/src/inference/__init__.py create mode 100644 CaloClusterGNN/src/inference/cluster_reco.py create mode 100644 CaloClusterGNN/src/inference/postprocess.py create mode 100644 CaloClusterGNN/src/models/__init__.py create mode 100644 CaloClusterGNN/src/models/calo_cluster_net.py create mode 100644 CaloClusterGNN/src/models/heads.py create mode 100644 CaloClusterGNN/src/models/layers.py create mode 100644 CaloClusterGNN/src/models/simple_edge_net.py create mode 100644 CaloClusterGNN/src/training/__init__.py create mode 100644 CaloClusterGNN/src/training/losses.py create mode 100644 CaloClusterGNN/src/training/metrics.py create mode 100644 CaloClusterGNN/src/training/trainer.py create mode 100644 CaloClusterGNN/tests/test_calo_cluster_net.py create mode 100644 CaloClusterGNN/tests/test_graph_builder.py create mode 100644 CaloClusterGNN/tests/test_inference.py create mode 100644 CaloClusterGNN/tests/test_postprocess.py create mode 100644 CaloClusterGNN/tests/test_truth_labels.py create mode 100644 CaloClusterGNN/tests/test_truth_labels_primary.py diff --git a/CaloClusterGNN/README.md b/CaloClusterGNN/README.md new file mode 100644 index 0000000..cb51022 --- /dev/null +++ b/CaloClusterGNN/README.md @@ -0,0 +1,198 @@ +# CaloClusterGNN + +## Introduction + +A Graph Neural Network for calorimeter hit clustering, intended as a +drop-in (parallel) replacement for the existing seed+BFS +`CaloClusterMaker` in Mu2e Offline. The deployed recipe is +**CaloClusterNet + BFS-style traversal at ExpandCut = 10 MeV** +("CCN+BFS10"). On the MDC2025 mixed-pileup test set +(276,688 events / 481,543 disk-graphs) it beats BFS on every +downstream-relevant cluster-physics metric: + +| Metric (E_reco >= 50 MeV) | BFS | CCN+BFS10 | Change | +|---------------------------|-------|-----------|--------| +| Mean abs(dE) / MeV | 0.839 | 0.616 | -27% | +| 95th-pct abs(dE) / MeV | 3.520 | 2.338 | -34% | +| Mean centroid dr / mm | 1.589 | 1.292 | -19% | +| 95th-pct dr / mm | 3.606 | 2.294 | -36% | + +Two model classes are trained from this directory: +* `SimpleEdgeNet` -- 215 K params, 3 message-passing rounds, sum aggregation. +* `CaloClusterNet` -- 676 K params, 4 EdgeAwareResBlocks with gated + aggregation + global context, optional node-saliency head. + This is the production model. + +Both use the same input graph (one per calorimeter disk per event, +6 node features + 8 edge features), same z-score normalisation, and +the same pipeline -- so swapping models in deployment is config-only +(see [`docs/onnx_deployment.md`](#deployment) cross-link below). + +This README covers, in order: +* where an analyzer can find things they might like to know; +* instructions for retraining (full pipeline, end to end); +* a table of commits for each training version; +* deployment cross-links. + +## For the Interested Analyzer + +Two short pointers: + +* **Per-graph definition** -- one PyG `Data` object per calorimeter + disk per event. Node features: `log(1+E)`, time, x, y, radial r, + per-graph relative energy `E/E_max`. Edge features: dx, dy, distance, + dt, dlog_e, energy asymmetry, log summed energy, dr. + See `src/data/graph_builder.py`. +* **Truth labelling** -- "calo-entrant ancestor" rule: each `CaloHit` + is grouped under the highest Geant4 ancestor that also deposited + energy in the same disk. This recovers true shower membership for + hits split across crystals during showering. Requires the + `calomcsim.ancestorSimIds` branch added in + [Mu2e/EventNtuple #](https://github.com/Mu2e/EventNtuple/pulls). + See `src/data/truth_labels_primary.py`. + +## For the Interested (Re)Trainer + +Two stages: (a) train + freeze a model and export it to ONNX; +(b) the C++ inference module that consumes the `.onnx` lives in +`Mu2e/Offline:Offline/CaloCluster/` and is **not** part of MLTrain. +This subdirectory is training only. + +### General Setup + +Fork this repo and clone: + +```bash +cd /path/to/your/work/area/ + +# only needs to be done once +git clone https://www.github.com/YourGitHubUsername/MLTrain.git +cd MLTrain/ +git remote add -f mu2e https://www.github.com/Mu2e/MLTrain.git + +# whenever you start a new development cycle +git fetch mu2e main +git checkout --no-track -b your-new-branchname mu2e/main +cd CaloClusterGNN/ +``` + +Activate the Mu2e Python environment (PyTorch 2.5.1, PyG 2.7.0, +uproot 5.7.2 are available in `ana 2.6.1`). The included +`setup_env.sh` wraps `setupmu2e-art.sh` + `pyenv ana 2.6.1` and +extends `PYTHONPATH` so the unit tests find the `src/` package: + +```bash +source setup_env.sh # works in interactive and batch shells +python3 scripts/smoke_test_env.py +python3 -m unittest discover -s tests -p "test_*.py" -v +``` + +### Training Pipeline + +Five steps, end to end. All paths are relative to `CaloClusterGNN/`. + +1. **Lock a dataset split.** The split is frozen for v2 at 35/7/8 + (train/val/test) -- if you are extending the dataset, regenerate: + + ```bash + python3 scripts/make_splits.py # writes splits/{train,val,test}_files.txt + ``` + +2. **Build per-disk graphs from EventNtuple ROOT files.** Reads the + v2 NTS files (which carry `calomcsim.ancestorSimIds`), applies the + calo-entrant truth rule, and writes one `.pt` graph per disk per + event under `data/processed/`. About 10 min for 41,656 graphs on a + CPU node: + + ```bash + bash scripts/build_all_graphs.sh + python3 scripts/pack_graphs.py # packs into train.pt / val.pt / test.pt + ``` + +3. **Train.** Two model families, each via a config file under + `configs/`. CaloClusterNet (production model): + + ```bash + python3 scripts/train_gnn.py \ + --config configs/calo_cluster_net.yaml \ + --device cuda --run-name calo_cluster_net_v2_stage1 + ``` + + SimpleEdgeNet: + + ```bash + python3 scripts/train_gnn.py \ + --config configs/default.yaml \ + --device cuda --epochs 100 --batch-size 64 \ + --run-name simple_edge_net_v2 + ``` + + Per-run outputs land under `outputs/runs//`. + +4. **Tune the edge threshold on val** (model-agnostic): + + ```bash + python3 scripts/tune_threshold.py \ + --config configs/calo_cluster_net.yaml \ + --checkpoint outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt + ``` + + The frozen v2 thresholds are baked into the configs: + CaloClusterNet `tau_edge=0.20`, SimpleEdgeNet `tau_edge=0.26`. + +5. **Evaluate once on test, run failure audits + cluster-physics + evaluation:** + + ```bash + OMP_NUM_THREADS=4 PYTHONUNBUFFERED=1 python3 -u scripts/evaluate_test.py + OMP_NUM_THREADS=4 PYTHONUNBUFFERED=1 python3 -u scripts/evaluate_cluster_physics.py + python3 scripts/failure_audit.py + ``` + +### Frozen Recipe Values + +These match the deployment defaults (see Deployment below). If you +change them you'll want to re-tune and re-evaluate: + +| Hyperparameter | Value | Where it's set | +|------------------|-------|------------------------------------| +| `r_max` | 210 mm| `configs/*.yaml -> graph.r_max` | +| `dt_max` | 25 ns | `configs/*.yaml -> graph.dt_max` | +| `k_min` | 3 | `configs/*.yaml -> graph.k_min` | +| `k_max` | 20 | `configs/*.yaml -> graph.k_max` | +| `tau_edge` (CCN) | 0.20 | `configs/calo_cluster_net.yaml` | +| `tau_edge` (SEN) | 0.26 | `configs/default.yaml` | +| `bfs_expand_cut` | 10 MeV| `configs/calo_cluster_net.yaml` | +| `min_hits` | 2 | `configs/calo_cluster_net.yaml` | +| `min_energy_mev` | 10.0 | `configs/calo_cluster_net.yaml` | + +## Versions and Provenance + +| Version | Commit | EventNtuple dataset | +|-------------------------------|--------|----------------------------------------------------------------------------------| +| `calo-cluster-net-v2-stage1` | TBD | `FlateMinusMix1BBTriggered/MDC2025-002` (50 files, MDC2025af, with ancestorSimIds)| +| `simple-edge-net-v2` | TBD | same | + +The EventNtuples for v2 require the `calomcsim.ancestorSimIds` +branch added in +[Mu2e/EventNtuple#TBD](https://github.com/Mu2e/EventNtuple/pulls). + +## Deployment + +The trained models ship to Mu2e/Offline as ONNX artifacts; the C++ +inference modules live in `Offline/CaloCluster/`. See: + +* `Mu2e/Offline:Offline/CaloCluster/src/CaloHitGraphMaker_module.cc` + (per-disk graph construction, port of `src/data/graph_builder.py`) +* `Mu2e/Offline:Offline/CaloCluster/src/CaloClusterMakerGNN_module.cc` + (ONNX inference + cluster assembly, model-agnostic; one C++ class + swaps SimpleEdgeNet vs CaloClusterNet via FHiCL) + +Trained `.onnx` artifacts live in the Mu2e data area and are picked +up by `art::ConfigFileLookupPolicy` at job start. The deployment-side +parity gate (Python pipeline vs C++ Offline pipeline, byte-exact on +cluster labels) lives in the Mu2e/Offline PR for this work. + +## License + +This subdirectory inherits the MLTrain repository LICENSE. diff --git a/CaloClusterGNN/configs/calo_cluster_net.yaml b/CaloClusterGNN/configs/calo_cluster_net.yaml new file mode 100644 index 0000000..18f8f3b --- /dev/null +++ b/CaloClusterGNN/configs/calo_cluster_net.yaml @@ -0,0 +1,57 @@ +# CaloClusterNet training configuration +# Stage 1: edge-only loss (lambda_node=0, lambda_cons=0) +# To advance stages, update lambda_node/lambda_cons and resume from checkpoint. +# +# Usage (GPU node): +# python3 scripts/train_gnn.py --config configs/calo_cluster_net.yaml --device cuda +# +# Stage 2: set lambda_node: 0.3, optionally lower lr, resume from Stage 1 best checkpoint +# Stage 3: set lambda_cons: 0.05, resume from Stage 2 best checkpoint + +data: + crystal_geometry: data/crystal_geometry.csv + crystal_neighbors: data/crystal_neighbors.csv + processed_dir: data/processed/ + normalization_stats: data/normalization_stats.pt + splits: + train: splits/train_files.txt + val: splits/val_files.txt + test: splits/test_files.txt + +graph: + r_max_mm: 210.0 + dt_max_ns: 25.0 + k_min: 3 + k_max: 20 + use_topology_graph: false + +model: + name: CaloClusterNet + hidden_dim: 96 + n_mp_layers: 4 + dropout: 0.1 + +train: + optimizer: adamw + lr: 1.0e-3 + weight_decay: 1.0e-4 + scheduler: plateau + epochs: 100 + early_stop_patience: 15 + batch_size: 32 + neg_pos_ratio: 5 + # Stage 1: edge only + lambda_edge: 1.0 + lambda_node: 0.0 + lambda_cons: 0.0 + +inference: + tau_node: 0.5 + tau_edge: 0.20 + min_hits: 2 + min_energy_mev: 10.0 + +output: + run_dir: outputs/runs/ + checkpoint_dir: checkpoints/ + debug_dir: outputs/debug/ diff --git a/CaloClusterGNN/configs/calo_cluster_net_saliency.yaml b/CaloClusterGNN/configs/calo_cluster_net_saliency.yaml new file mode 100644 index 0000000..75815ca --- /dev/null +++ b/CaloClusterGNN/configs/calo_cluster_net_saliency.yaml @@ -0,0 +1,56 @@ +# CaloClusterNet with learned node saliency for bridge-hit identification +# Resume from Stage 1 best checkpoint. Node saliency labels redefined: +# y_node=1 for multi-hit cluster members, y_node=0 for singletons/ambiguous. +# +# Usage (GPU node): +# python3 scripts/train_gnn.py --config configs/calo_cluster_net_saliency.yaml \ +# --device cuda --run-name calo_cluster_net_v2_saliency \ +# --resume outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt + +data: + crystal_geometry: data/crystal_geometry.csv + crystal_neighbors: data/crystal_neighbors.csv + processed_dir: data/processed/ + normalization_stats: data/normalization_stats.pt + splits: + train: splits/train_files.txt + val: splits/val_files.txt + test: splits/test_files.txt + +graph: + r_max_mm: 210.0 + dt_max_ns: 25.0 + k_min: 3 + k_max: 20 + use_topology_graph: false + +model: + name: CaloClusterNet + hidden_dim: 96 + n_mp_layers: 4 + dropout: 0.1 + +train: + optimizer: adamw + lr: 5.0e-4 + weight_decay: 1.0e-4 + scheduler: plateau + epochs: 100 + early_stop_patience: 15 + batch_size: 32 + neg_pos_ratio: 5 + # Edge + node saliency (new multi-hit labels) + lambda_edge: 1.0 + lambda_node: 0.3 + lambda_cons: 0.0 + +inference: + tau_node: 0.5 + tau_edge: 0.14 + min_hits: 2 + min_energy_mev: 10.0 + +output: + run_dir: outputs/runs/ + checkpoint_dir: checkpoints/ + debug_dir: outputs/debug/ diff --git a/CaloClusterGNN/configs/calo_cluster_net_stage2.yaml b/CaloClusterGNN/configs/calo_cluster_net_stage2.yaml new file mode 100644 index 0000000..b0224f5 --- /dev/null +++ b/CaloClusterGNN/configs/calo_cluster_net_stage2.yaml @@ -0,0 +1,55 @@ +# CaloClusterNet Stage 2: edge + node saliency loss +# Resume from Stage 1 best checkpoint. +# +# Usage (GPU node): +# python3 scripts/train_gnn.py --config configs/calo_cluster_net_stage2.yaml \ +# --device cuda --run-name calo_cluster_net_stage2 \ +# --resume outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt + +data: + crystal_geometry: data/crystal_geometry.csv + crystal_neighbors: data/crystal_neighbors.csv + processed_dir: data/processed/ + normalization_stats: data/normalization_stats.pt + splits: + train: splits/train_files.txt + val: splits/val_files.txt + test: splits/test_files.txt + +graph: + r_max_mm: 210.0 + dt_max_ns: 25.0 + k_min: 3 + k_max: 20 + use_topology_graph: false + +model: + name: CaloClusterNet + hidden_dim: 96 + n_mp_layers: 4 + dropout: 0.1 + +train: + optimizer: adamw + lr: 5.0e-4 + weight_decay: 1.0e-4 + scheduler: plateau + epochs: 100 + early_stop_patience: 15 + batch_size: 32 + neg_pos_ratio: 5 + # Stage 2: edge + node saliency + lambda_edge: 1.0 + lambda_node: 0.3 + lambda_cons: 0.0 + +inference: + tau_node: 0.5 + tau_edge: 0.34 + min_hits: 2 + min_energy_mev: 10.0 + +output: + run_dir: outputs/runs/ + checkpoint_dir: checkpoints/ + debug_dir: outputs/debug/ diff --git a/CaloClusterGNN/configs/default.yaml b/CaloClusterGNN/configs/default.yaml new file mode 100644 index 0000000..6821c68 --- /dev/null +++ b/CaloClusterGNN/configs/default.yaml @@ -0,0 +1,55 @@ +# Default configuration for GNN calorimeter clustering +# Override individual keys on the command line or by creating a separate YAML +# that includes this one. + +data: + crystal_geometry: data/crystal_geometry.csv + crystal_neighbors: data/crystal_neighbors.csv + processed_dir: data/processed/ + normalization_stats: data/normalization_stats.pt + splits: + train: splits/train_files.txt + val: splits/val_files.txt + test: splits/test_files.txt +graph: + # Hybrid radius + kNN strategy (implemented with scipy.spatial.cKDTree — + # torch-cluster / radius_graph is NOT available in this environment) + r_max_mm: 210.0 # radius graph cutoff (mm, disk-local frame) + dt_max_ns: 25.0 # maximum |Δt| for any edge (ns) + k_min: 3 # minimum neighbors for isolated nodes (kNN fallback) + k_max: 20 # degree cap per node (keeps dense events tractable) + # Alternative: topology-based graph from crystal_neighbors.csv + # Set use_topology_graph: true to use neighbors()/nextNeighbors() instead of radius + use_topology_graph: false + +model: + name: SimpleEdgeNet # or CaloClusterNet + hidden_dim: 64 # SimpleEdgeNet; use 96 for CaloClusterNet + n_mp_layers: 3 # message-passing rounds + +train: + optimizer: adamw + lr: 1.0e-3 + weight_decay: 1.0e-4 + scheduler: plateau # ReduceLROnPlateau on val pairwise F1 + epochs: 100 + early_stop_patience: 15 + batch_size: 32 # graphs per batch + # Class imbalance + neg_pos_ratio: 5 # subsample negatives during training (5:1) + # Multi-task loss weights (Stage 1: edge only; Stage 2+: add node) + lambda_edge: 1.0 + lambda_node: 0.0 # set to 0.3 for Stage 2 + lambda_cons: 0.0 # set to 0.05 for Stage 3 + +inference: + # Tuned on val set — do not change after test evaluation begins + tau_node: 0.5 + tau_edge: 0.26 + min_hits: 2 + min_energy_mev: 10.0 + +output: + run_dir: outputs/runs/ + checkpoint_dir: checkpoints/ + debug_dir: outputs/debug/ diff --git a/CaloClusterGNN/data/crystal_geometry.csv b/CaloClusterGNN/data/crystal_geometry.csv new file mode 100644 index 0000000..92c1c0a --- /dev/null +++ b/CaloClusterGNN/data/crystal_geometry.csv @@ -0,0 +1,1349 @@ +crystalId,diskId,x_mm,y_mm +0,0,-188.6,377.3 +1,0,-154.4,377.3 +2,0,-120.1,377.3 +3,0,120.1,377.3 +4,0,154.4,377.3 +5,0,188.6,377.3 +6,0,205.8,343.0 +7,0,205.8,-343.0 +8,0,188.6,-377.3 +9,0,154.4,-377.3 +10,0,120.1,-377.3 +11,0,-120.1,-377.3 +12,0,-154.4,-377.3 +13,0,-188.6,-377.3 +14,0,-205.8,-343.0 +15,0,-205.8,343.0 +16,0,-205.8,411.6 +17,0,-171.5,411.6 +18,0,-137.2,411.6 +19,0,-102.9,411.6 +20,0,-68.6,411.6 +21,0,-34.3,411.6 +22,0,0.0,411.6 +23,0,34.3,411.6 +24,0,68.6,411.6 +25,0,102.9,411.6 +26,0,137.2,411.6 +27,0,171.5,411.6 +28,0,205.8,411.6 +29,0,222.9,377.3 +30,0,240.1,343.0 +31,0,257.2,308.7 +32,0,394.5,34.3 +33,0,411.6,0.0 +34,0,394.5,-34.3 +35,0,257.2,-308.7 +36,0,240.1,-343.0 +37,0,222.9,-377.3 +38,0,205.8,-411.6 +39,0,171.5,-411.6 +40,0,137.2,-411.6 +41,0,102.9,-411.6 +42,0,68.6,-411.6 +43,0,34.3,-411.6 +44,0,0.0,-411.6 +45,0,-34.3,-411.6 +46,0,-68.6,-411.6 +47,0,-102.9,-411.6 +48,0,-137.2,-411.6 +49,0,-171.5,-411.6 +50,0,-205.8,-411.6 +51,0,-222.9,-377.3 +52,0,-240.1,-343.0 +53,0,-257.2,-308.7 +54,0,-394.5,-34.3 +55,0,-411.6,0.0 +56,0,-394.5,34.3 +57,0,-257.2,308.7 +58,0,-240.1,343.0 +59,0,-222.9,377.3 +60,0,-222.9,445.9 +61,0,-188.6,445.9 +62,0,-154.4,445.9 +63,0,-120.1,445.9 +64,0,-85.8,445.9 +65,0,-51.5,445.9 +66,0,-17.1,445.9 +67,0,17.1,445.9 +68,0,51.5,445.9 +69,0,85.8,445.9 +70,0,120.1,445.9 +71,0,154.4,445.9 +72,0,188.6,445.9 +73,0,222.9,445.9 +74,0,240.1,411.6 +75,0,257.2,377.3 +76,0,274.4,343.0 +77,0,291.5,308.7 +78,0,308.7,274.4 +79,0,325.9,240.1 +80,0,343.0,205.8 +81,0,360.1,171.5 +82,0,377.3,137.2 +83,0,394.5,102.9 +84,0,411.6,68.6 +85,0,428.8,34.3 +86,0,445.9,0.0 +87,0,428.8,-34.3 +88,0,411.6,-68.6 +89,0,394.5,-102.9 +90,0,377.3,-137.2 +91,0,360.1,-171.5 +92,0,343.0,-205.8 +93,0,325.9,-240.1 +94,0,308.7,-274.4 +95,0,291.5,-308.7 +96,0,274.4,-343.0 +97,0,257.2,-377.3 +98,0,240.1,-411.6 +99,0,222.9,-445.9 +100,0,188.6,-445.9 +101,0,154.4,-445.9 +102,0,120.1,-445.9 +103,0,85.8,-445.9 +104,0,51.5,-445.9 +105,0,17.1,-445.9 +106,0,-17.1,-445.9 +107,0,-51.5,-445.9 +108,0,-85.8,-445.9 +109,0,-120.1,-445.9 +110,0,-154.4,-445.9 +111,0,-188.6,-445.9 +112,0,-222.9,-445.9 +113,0,-240.1,-411.6 +114,0,-257.2,-377.3 +115,0,-274.4,-343.0 +116,0,-291.5,-308.7 +117,0,-308.7,-274.4 +118,0,-325.9,-240.1 +119,0,-343.0,-205.8 +120,0,-360.1,-171.5 +121,0,-377.3,-137.2 +122,0,-394.5,-102.9 +123,0,-411.6,-68.6 +124,0,-428.8,-34.3 +125,0,-445.9,0.0 +126,0,-428.8,34.3 +127,0,-411.6,68.6 +128,0,-394.5,102.9 +129,0,-377.3,137.2 +130,0,-360.1,171.5 +131,0,-343.0,205.8 +132,0,-325.9,240.1 +133,0,-308.7,274.4 +134,0,-291.5,308.7 +135,0,-274.4,343.0 +136,0,-257.2,377.3 +137,0,-240.1,411.6 +138,0,-240.1,480.2 +139,0,-205.8,480.2 +140,0,-171.5,480.2 +141,0,-137.2,480.2 +142,0,-102.9,480.2 +143,0,-68.6,480.2 +144,0,-34.3,480.2 +145,0,0.0,480.2 +146,0,34.3,480.2 +147,0,68.6,480.2 +148,0,102.9,480.2 +149,0,137.2,480.2 +150,0,171.5,480.2 +151,0,205.8,480.2 +152,0,240.1,480.2 +153,0,257.2,445.9 +154,0,274.4,411.6 +155,0,291.5,377.3 +156,0,308.7,343.0 +157,0,325.9,308.7 +158,0,343.0,274.4 +159,0,360.1,240.1 +160,0,377.3,205.8 +161,0,394.5,171.5 +162,0,411.6,137.2 +163,0,428.8,102.9 +164,0,445.9,68.6 +165,0,463.0,34.3 +166,0,480.2,0.0 +167,0,463.0,-34.3 +168,0,445.9,-68.6 +169,0,428.8,-102.9 +170,0,411.6,-137.2 +171,0,394.5,-171.5 +172,0,377.3,-205.8 +173,0,360.1,-240.1 +174,0,343.0,-274.4 +175,0,325.9,-308.7 +176,0,308.7,-343.0 +177,0,291.5,-377.3 +178,0,274.4,-411.6 +179,0,257.2,-445.9 +180,0,240.1,-480.2 +181,0,205.8,-480.2 +182,0,171.5,-480.2 +183,0,137.2,-480.2 +184,0,102.9,-480.2 +185,0,68.6,-480.2 +186,0,34.3,-480.2 +187,0,0.0,-480.2 +188,0,-34.3,-480.2 +189,0,-68.6,-480.2 +190,0,-102.9,-480.2 +191,0,-137.2,-480.2 +192,0,-171.5,-480.2 +193,0,-205.8,-480.2 +194,0,-240.1,-480.2 +195,0,-257.2,-445.9 +196,0,-274.4,-411.6 +197,0,-291.5,-377.3 +198,0,-308.7,-343.0 +199,0,-325.9,-308.7 +200,0,-343.0,-274.4 +201,0,-360.1,-240.1 +202,0,-377.3,-205.8 +203,0,-394.5,-171.5 +204,0,-411.6,-137.2 +205,0,-428.8,-102.9 +206,0,-445.9,-68.6 +207,0,-463.0,-34.3 +208,0,-480.2,0.0 +209,0,-463.0,34.3 +210,0,-445.9,68.6 +211,0,-428.8,102.9 +212,0,-411.6,137.2 +213,0,-394.5,171.5 +214,0,-377.3,205.8 +215,0,-360.1,240.1 +216,0,-343.0,274.4 +217,0,-325.9,308.7 +218,0,-308.7,343.0 +219,0,-291.5,377.3 +220,0,-274.4,411.6 +221,0,-257.2,445.9 +222,0,-257.2,514.5 +223,0,-222.9,514.5 +224,0,-188.6,514.5 +225,0,-154.4,514.5 +226,0,-120.1,514.5 +227,0,-85.8,514.5 +228,0,-51.5,514.5 +229,0,-17.1,514.5 +230,0,17.1,514.5 +231,0,51.5,514.5 +232,0,85.8,514.5 +233,0,120.1,514.5 +234,0,154.4,514.5 +235,0,188.6,514.5 +236,0,222.9,514.5 +237,0,257.2,514.5 +238,0,274.4,480.2 +239,0,291.5,445.9 +240,0,308.7,411.6 +241,0,325.9,377.3 +242,0,343.0,343.0 +243,0,360.1,308.7 +244,0,377.3,274.4 +245,0,394.5,240.1 +246,0,411.6,205.8 +247,0,428.8,171.5 +248,0,445.9,137.2 +249,0,463.0,102.9 +250,0,480.2,68.6 +251,0,497.4,34.3 +252,0,514.5,0.0 +253,0,497.4,-34.3 +254,0,480.2,-68.6 +255,0,463.0,-102.9 +256,0,445.9,-137.2 +257,0,428.8,-171.5 +258,0,411.6,-205.8 +259,0,394.5,-240.1 +260,0,377.3,-274.4 +261,0,360.1,-308.7 +262,0,343.0,-343.0 +263,0,325.9,-377.3 +264,0,308.7,-411.6 +265,0,291.5,-445.9 +266,0,274.4,-480.2 +267,0,257.2,-514.5 +268,0,222.9,-514.5 +269,0,188.6,-514.5 +270,0,154.4,-514.5 +271,0,120.1,-514.5 +272,0,85.8,-514.5 +273,0,51.5,-514.5 +274,0,17.1,-514.5 +275,0,-17.1,-514.5 +276,0,-51.5,-514.5 +277,0,-85.8,-514.5 +278,0,-120.1,-514.5 +279,0,-154.4,-514.5 +280,0,-188.6,-514.5 +281,0,-222.9,-514.5 +282,0,-257.2,-514.5 +283,0,-274.4,-480.2 +284,0,-291.5,-445.9 +285,0,-308.7,-411.6 +286,0,-325.9,-377.3 +287,0,-343.0,-343.0 +288,0,-360.1,-308.7 +289,0,-377.3,-274.4 +290,0,-394.5,-240.1 +291,0,-411.6,-205.8 +292,0,-428.8,-171.5 +293,0,-445.9,-137.2 +294,0,-463.0,-102.9 +295,0,-480.2,-68.6 +296,0,-497.4,-34.3 +297,0,-514.5,0.0 +298,0,-497.4,34.3 +299,0,-480.2,68.6 +300,0,-463.0,102.9 +301,0,-445.9,137.2 +302,0,-428.8,171.5 +303,0,-411.6,205.8 +304,0,-394.5,240.1 +305,0,-377.3,274.4 +306,0,-360.1,308.7 +307,0,-343.0,343.0 +308,0,-325.9,377.3 +309,0,-308.7,411.6 +310,0,-291.5,445.9 +311,0,-274.4,480.2 +312,0,-274.4,548.8 +313,0,-240.1,548.8 +314,0,-205.8,548.8 +315,0,-171.5,548.8 +316,0,-137.2,548.8 +317,0,-102.9,548.8 +318,0,-68.6,548.8 +319,0,-34.3,548.8 +320,0,0.0,548.8 +321,0,34.3,548.8 +322,0,68.6,548.8 +323,0,102.9,548.8 +324,0,137.2,548.8 +325,0,171.5,548.8 +326,0,205.8,548.8 +327,0,240.1,548.8 +328,0,274.4,548.8 +329,0,291.5,514.5 +330,0,308.7,480.2 +331,0,325.9,445.9 +332,0,343.0,411.6 +333,0,360.1,377.3 +334,0,377.3,343.0 +335,0,394.5,308.7 +336,0,411.6,274.4 +337,0,428.8,240.1 +338,0,445.9,205.8 +339,0,463.0,171.5 +340,0,480.2,137.2 +341,0,497.4,102.9 +342,0,514.5,68.6 +343,0,531.7,34.3 +344,0,548.8,0.0 +345,0,531.7,-34.3 +346,0,514.5,-68.6 +347,0,497.4,-102.9 +348,0,480.2,-137.2 +349,0,463.0,-171.5 +350,0,445.9,-205.8 +351,0,428.8,-240.1 +352,0,411.6,-274.4 +353,0,394.5,-308.7 +354,0,377.3,-343.0 +355,0,360.1,-377.3 +356,0,343.0,-411.6 +357,0,325.9,-445.9 +358,0,308.7,-480.2 +359,0,291.5,-514.5 +360,0,274.4,-548.8 +361,0,240.1,-548.8 +362,0,205.8,-548.8 +363,0,171.5,-548.8 +364,0,137.2,-548.8 +365,0,102.9,-548.8 +366,0,68.6,-548.8 +367,0,34.3,-548.8 +368,0,0.0,-548.8 +369,0,-34.3,-548.8 +370,0,-68.6,-548.8 +371,0,-102.9,-548.8 +372,0,-137.2,-548.8 +373,0,-171.5,-548.8 +374,0,-205.8,-548.8 +375,0,-240.1,-548.8 +376,0,-274.4,-548.8 +377,0,-291.5,-514.5 +378,0,-308.7,-480.2 +379,0,-325.9,-445.9 +380,0,-343.0,-411.6 +381,0,-360.1,-377.3 +382,0,-377.3,-343.0 +383,0,-394.5,-308.7 +384,0,-411.6,-274.4 +385,0,-428.8,-240.1 +386,0,-445.9,-205.8 +387,0,-463.0,-171.5 +388,0,-480.2,-137.2 +389,0,-497.4,-102.9 +390,0,-514.5,-68.6 +391,0,-531.7,-34.3 +392,0,-548.8,0.0 +393,0,-531.7,34.3 +394,0,-514.5,68.6 +395,0,-497.4,102.9 +396,0,-480.2,137.2 +397,0,-463.0,171.5 +398,0,-445.9,205.8 +399,0,-428.8,240.1 +400,0,-411.6,274.4 +401,0,-394.5,308.7 +402,0,-377.3,343.0 +403,0,-360.1,377.3 +404,0,-343.0,411.6 +405,0,-325.9,445.9 +406,0,-308.7,480.2 +407,0,-291.5,514.5 +408,0,-222.9,583.1 +409,0,-188.6,583.1 +410,0,-154.4,583.1 +411,0,-120.1,583.1 +412,0,-85.8,583.1 +413,0,-51.5,583.1 +414,0,-17.1,583.1 +415,0,17.1,583.1 +416,0,51.5,583.1 +417,0,85.8,583.1 +418,0,120.1,583.1 +419,0,154.4,583.1 +420,0,188.6,583.1 +421,0,222.9,583.1 +422,0,308.7,548.8 +423,0,325.9,514.5 +424,0,343.0,480.2 +425,0,360.1,445.9 +426,0,377.3,411.6 +427,0,394.5,377.3 +428,0,411.6,343.0 +429,0,428.8,308.7 +430,0,445.9,274.4 +431,0,463.0,240.1 +432,0,480.2,205.8 +433,0,497.4,171.5 +434,0,514.5,137.2 +435,0,531.7,102.9 +436,0,548.8,68.6 +437,0,566.0,34.3 +438,0,583.1,0.0 +439,0,566.0,-34.3 +440,0,548.8,-68.6 +441,0,531.7,-102.9 +442,0,514.5,-137.2 +443,0,497.4,-171.5 +444,0,480.2,-205.8 +445,0,463.0,-240.1 +446,0,445.9,-274.4 +447,0,428.8,-308.7 +448,0,411.6,-343.0 +449,0,394.5,-377.3 +450,0,377.3,-411.6 +451,0,360.1,-445.9 +452,0,343.0,-480.2 +453,0,325.9,-514.5 +454,0,308.7,-548.8 +455,0,222.9,-583.1 +456,0,188.6,-583.1 +457,0,154.4,-583.1 +458,0,120.1,-583.1 +459,0,85.8,-583.1 +460,0,51.5,-583.1 +461,0,17.1,-583.1 +462,0,-17.1,-583.1 +463,0,-51.5,-583.1 +464,0,-85.8,-583.1 +465,0,-120.1,-583.1 +466,0,-154.4,-583.1 +467,0,-188.6,-583.1 +468,0,-222.9,-583.1 +469,0,-308.7,-548.8 +470,0,-325.9,-514.5 +471,0,-343.0,-480.2 +472,0,-360.1,-445.9 +473,0,-377.3,-411.6 +474,0,-394.5,-377.3 +475,0,-411.6,-343.0 +476,0,-428.8,-308.7 +477,0,-445.9,-274.4 +478,0,-463.0,-240.1 +479,0,-480.2,-205.8 +480,0,-497.4,-171.5 +481,0,-514.5,-137.2 +482,0,-531.7,-102.9 +483,0,-548.8,-68.6 +484,0,-566.0,-34.3 +485,0,-583.1,0.0 +486,0,-566.0,34.3 +487,0,-548.8,68.6 +488,0,-531.7,102.9 +489,0,-514.5,137.2 +490,0,-497.4,171.5 +491,0,-480.2,205.8 +492,0,-463.0,240.1 +493,0,-445.9,274.4 +494,0,-428.8,308.7 +495,0,-411.6,343.0 +496,0,-394.5,377.3 +497,0,-377.3,411.6 +498,0,-360.1,445.9 +499,0,-343.0,480.2 +500,0,-325.9,514.5 +501,0,-308.7,548.8 +502,0,-137.2,617.4 +503,0,-102.9,617.4 +504,0,-68.6,617.4 +505,0,-34.3,617.4 +506,0,0.0,617.4 +507,0,34.3,617.4 +508,0,68.6,617.4 +509,0,102.9,617.4 +510,0,137.2,617.4 +511,0,360.1,514.5 +512,0,377.3,480.2 +513,0,394.5,445.9 +514,0,411.6,411.6 +515,0,428.8,377.3 +516,0,445.9,343.0 +517,0,463.0,308.7 +518,0,480.2,274.4 +519,0,497.4,240.1 +520,0,514.5,205.8 +521,0,531.7,171.5 +522,0,548.8,137.2 +523,0,566.0,102.9 +524,0,583.1,68.6 +525,0,600.2,34.3 +526,0,617.4,0.0 +527,0,600.2,-34.3 +528,0,583.1,-68.6 +529,0,566.0,-102.9 +530,0,548.8,-137.2 +531,0,531.7,-171.5 +532,0,514.5,-205.8 +533,0,497.4,-240.1 +534,0,480.2,-274.4 +535,0,463.0,-308.7 +536,0,445.9,-343.0 +537,0,428.8,-377.3 +538,0,411.6,-411.6 +539,0,394.5,-445.9 +540,0,377.3,-480.2 +541,0,360.1,-514.5 +542,0,137.2,-617.4 +543,0,102.9,-617.4 +544,0,68.6,-617.4 +545,0,34.3,-617.4 +546,0,0.0,-617.4 +547,0,-34.3,-617.4 +548,0,-68.6,-617.4 +549,0,-102.9,-617.4 +550,0,-137.2,-617.4 +551,0,-360.1,-514.5 +552,0,-377.3,-480.2 +553,0,-394.5,-445.9 +554,0,-411.6,-411.6 +555,0,-428.8,-377.3 +556,0,-445.9,-343.0 +557,0,-463.0,-308.7 +558,0,-480.2,-274.4 +559,0,-497.4,-240.1 +560,0,-514.5,-205.8 +561,0,-531.7,-171.5 +562,0,-548.8,-137.2 +563,0,-566.0,-102.9 +564,0,-583.1,-68.6 +565,0,-600.2,-34.3 +566,0,-617.4,0.0 +567,0,-600.2,34.3 +568,0,-583.1,68.6 +569,0,-566.0,102.9 +570,0,-548.8,137.2 +571,0,-531.7,171.5 +572,0,-514.5,205.8 +573,0,-497.4,240.1 +574,0,-480.2,274.4 +575,0,-463.0,308.7 +576,0,-445.9,343.0 +577,0,-428.8,377.3 +578,0,-411.6,411.6 +579,0,-394.5,445.9 +580,0,-377.3,480.2 +581,0,-360.1,514.5 +582,0,411.6,480.2 +583,0,428.8,445.9 +584,0,445.9,411.6 +585,0,463.0,377.3 +586,0,480.2,343.0 +587,0,497.4,308.7 +588,0,514.5,274.4 +589,0,531.7,240.1 +590,0,548.8,205.8 +591,0,566.0,171.5 +592,0,583.1,137.2 +593,0,600.2,102.9 +594,0,617.4,68.6 +595,0,634.5,34.3 +596,0,634.5,-34.3 +597,0,617.4,-68.6 +598,0,600.2,-102.9 +599,0,583.1,-137.2 +600,0,566.0,-171.5 +601,0,548.8,-205.8 +602,0,531.7,-240.1 +603,0,514.5,-274.4 +604,0,497.4,-308.7 +605,0,480.2,-343.0 +606,0,463.0,-377.3 +607,0,445.9,-411.6 +608,0,428.8,-445.9 +609,0,411.6,-480.2 +610,0,-411.6,-480.2 +611,0,-428.8,-445.9 +612,0,-445.9,-411.6 +613,0,-463.0,-377.3 +614,0,-480.2,-343.0 +615,0,-497.4,-308.7 +616,0,-514.5,-274.4 +617,0,-531.7,-240.1 +618,0,-548.8,-205.8 +619,0,-566.0,-171.5 +620,0,-583.1,-137.2 +621,0,-600.2,-102.9 +622,0,-617.4,-68.6 +623,0,-634.5,-34.3 +624,0,-634.5,34.3 +625,0,-617.4,68.6 +626,0,-600.2,102.9 +627,0,-583.1,137.2 +628,0,-566.0,171.5 +629,0,-548.8,205.8 +630,0,-531.7,240.1 +631,0,-514.5,274.4 +632,0,-497.4,308.7 +633,0,-480.2,343.0 +634,0,-463.0,377.3 +635,0,-445.9,411.6 +636,0,-428.8,445.9 +637,0,-411.6,480.2 +638,0,480.2,411.6 +639,0,497.4,377.3 +640,0,514.5,343.0 +641,0,531.7,308.7 +642,0,548.8,274.4 +643,0,566.0,240.1 +644,0,583.1,205.8 +645,0,600.2,171.5 +646,0,617.4,137.2 +647,0,617.4,-137.2 +648,0,600.2,-171.5 +649,0,583.1,-205.8 +650,0,566.0,-240.1 +651,0,548.8,-274.4 +652,0,531.7,-308.7 +653,0,514.5,-343.0 +654,0,497.4,-377.3 +655,0,480.2,-411.6 +656,0,-480.2,-411.6 +657,0,-497.4,-377.3 +658,0,-514.5,-343.0 +659,0,-531.7,-308.7 +660,0,-548.8,-274.4 +661,0,-566.0,-240.1 +662,0,-583.1,-205.8 +663,0,-600.2,-171.5 +664,0,-617.4,-137.2 +665,0,-617.4,137.2 +666,0,-600.2,171.5 +667,0,-583.1,205.8 +668,0,-566.0,240.1 +669,0,-548.8,274.4 +670,0,-531.7,308.7 +671,0,-514.5,343.0 +672,0,-497.4,377.3 +673,0,-480.2,411.6 +674,1,-188.6,377.3 +675,1,-154.4,377.3 +676,1,-120.1,377.3 +677,1,120.1,377.3 +678,1,154.4,377.3 +679,1,188.6,377.3 +680,1,205.8,343.0 +681,1,205.8,-343.0 +682,1,188.6,-377.3 +683,1,154.4,-377.3 +684,1,120.1,-377.3 +685,1,-120.1,-377.3 +686,1,-154.4,-377.3 +687,1,-188.6,-377.3 +688,1,-205.8,-343.0 +689,1,-205.8,343.0 +690,1,-205.8,411.6 +691,1,-171.5,411.6 +692,1,-137.2,411.6 +693,1,-102.9,411.6 +694,1,-68.6,411.6 +695,1,-34.3,411.6 +696,1,0.0,411.6 +697,1,34.3,411.6 +698,1,68.6,411.6 +699,1,102.9,411.6 +700,1,137.2,411.6 +701,1,171.5,411.6 +702,1,205.8,411.6 +703,1,222.9,377.3 +704,1,240.1,343.0 +705,1,257.2,308.7 +706,1,394.5,34.3 +707,1,411.6,0.0 +708,1,394.5,-34.3 +709,1,257.2,-308.7 +710,1,240.1,-343.0 +711,1,222.9,-377.3 +712,1,205.8,-411.6 +713,1,171.5,-411.6 +714,1,137.2,-411.6 +715,1,102.9,-411.6 +716,1,68.6,-411.6 +717,1,34.3,-411.6 +718,1,0.0,-411.6 +719,1,-34.3,-411.6 +720,1,-68.6,-411.6 +721,1,-102.9,-411.6 +722,1,-137.2,-411.6 +723,1,-171.5,-411.6 +724,1,-205.8,-411.6 +725,1,-222.9,-377.3 +726,1,-240.1,-343.0 +727,1,-257.2,-308.7 +728,1,-394.5,-34.3 +729,1,-411.6,0.0 +730,1,-394.5,34.3 +731,1,-257.2,308.7 +732,1,-240.1,343.0 +733,1,-222.9,377.3 +734,1,-222.9,445.9 +735,1,-188.6,445.9 +736,1,-154.4,445.9 +737,1,-120.1,445.9 +738,1,-85.8,445.9 +739,1,-51.5,445.9 +740,1,-17.1,445.9 +741,1,17.1,445.9 +742,1,51.5,445.9 +743,1,85.8,445.9 +744,1,120.1,445.9 +745,1,154.4,445.9 +746,1,188.6,445.9 +747,1,222.9,445.9 +748,1,240.1,411.6 +749,1,257.2,377.3 +750,1,274.4,343.0 +751,1,291.5,308.7 +752,1,308.7,274.4 +753,1,325.9,240.1 +754,1,343.0,205.8 +755,1,360.1,171.5 +756,1,377.3,137.2 +757,1,394.5,102.9 +758,1,411.6,68.6 +759,1,428.8,34.3 +760,1,445.9,0.0 +761,1,428.8,-34.3 +762,1,411.6,-68.6 +763,1,394.5,-102.9 +764,1,377.3,-137.2 +765,1,360.1,-171.5 +766,1,343.0,-205.8 +767,1,325.9,-240.1 +768,1,308.7,-274.4 +769,1,291.5,-308.7 +770,1,274.4,-343.0 +771,1,257.2,-377.3 +772,1,240.1,-411.6 +773,1,222.9,-445.9 +774,1,188.6,-445.9 +775,1,154.4,-445.9 +776,1,120.1,-445.9 +777,1,85.8,-445.9 +778,1,51.5,-445.9 +779,1,17.1,-445.9 +780,1,-17.1,-445.9 +781,1,-51.5,-445.9 +782,1,-85.8,-445.9 +783,1,-120.1,-445.9 +784,1,-154.4,-445.9 +785,1,-188.6,-445.9 +786,1,-222.9,-445.9 +787,1,-240.1,-411.6 +788,1,-257.2,-377.3 +789,1,-274.4,-343.0 +790,1,-291.5,-308.7 +791,1,-308.7,-274.4 +792,1,-325.9,-240.1 +793,1,-343.0,-205.8 +794,1,-360.1,-171.5 +795,1,-377.3,-137.2 +796,1,-394.5,-102.9 +797,1,-411.6,-68.6 +798,1,-428.8,-34.3 +799,1,-445.9,0.0 +800,1,-428.8,34.3 +801,1,-411.6,68.6 +802,1,-394.5,102.9 +803,1,-377.3,137.2 +804,1,-360.1,171.5 +805,1,-343.0,205.8 +806,1,-325.9,240.1 +807,1,-308.7,274.4 +808,1,-291.5,308.7 +809,1,-274.4,343.0 +810,1,-257.2,377.3 +811,1,-240.1,411.6 +812,1,-240.1,480.2 +813,1,-205.8,480.2 +814,1,-171.5,480.2 +815,1,-137.2,480.2 +816,1,-102.9,480.2 +817,1,-68.6,480.2 +818,1,-34.3,480.2 +819,1,0.0,480.2 +820,1,34.3,480.2 +821,1,68.6,480.2 +822,1,102.9,480.2 +823,1,137.2,480.2 +824,1,171.5,480.2 +825,1,205.8,480.2 +826,1,240.1,480.2 +827,1,257.2,445.9 +828,1,274.4,411.6 +829,1,291.5,377.3 +830,1,308.7,343.0 +831,1,325.9,308.7 +832,1,343.0,274.4 +833,1,360.1,240.1 +834,1,377.3,205.8 +835,1,394.5,171.5 +836,1,411.6,137.2 +837,1,428.8,102.9 +838,1,445.9,68.6 +839,1,463.0,34.3 +840,1,480.2,0.0 +841,1,463.0,-34.3 +842,1,445.9,-68.6 +843,1,428.8,-102.9 +844,1,411.6,-137.2 +845,1,394.5,-171.5 +846,1,377.3,-205.8 +847,1,360.1,-240.1 +848,1,343.0,-274.4 +849,1,325.9,-308.7 +850,1,308.7,-343.0 +851,1,291.5,-377.3 +852,1,274.4,-411.6 +853,1,257.2,-445.9 +854,1,240.1,-480.2 +855,1,205.8,-480.2 +856,1,171.5,-480.2 +857,1,137.2,-480.2 +858,1,102.9,-480.2 +859,1,68.6,-480.2 +860,1,34.3,-480.2 +861,1,0.0,-480.2 +862,1,-34.3,-480.2 +863,1,-68.6,-480.2 +864,1,-102.9,-480.2 +865,1,-137.2,-480.2 +866,1,-171.5,-480.2 +867,1,-205.8,-480.2 +868,1,-240.1,-480.2 +869,1,-257.2,-445.9 +870,1,-274.4,-411.6 +871,1,-291.5,-377.3 +872,1,-308.7,-343.0 +873,1,-325.9,-308.7 +874,1,-343.0,-274.4 +875,1,-360.1,-240.1 +876,1,-377.3,-205.8 +877,1,-394.5,-171.5 +878,1,-411.6,-137.2 +879,1,-428.8,-102.9 +880,1,-445.9,-68.6 +881,1,-463.0,-34.3 +882,1,-480.2,0.0 +883,1,-463.0,34.3 +884,1,-445.9,68.6 +885,1,-428.8,102.9 +886,1,-411.6,137.2 +887,1,-394.5,171.5 +888,1,-377.3,205.8 +889,1,-360.1,240.1 +890,1,-343.0,274.4 +891,1,-325.9,308.7 +892,1,-308.7,343.0 +893,1,-291.5,377.3 +894,1,-274.4,411.6 +895,1,-257.2,445.9 +896,1,-257.2,514.5 +897,1,-222.9,514.5 +898,1,-188.6,514.5 +899,1,-154.4,514.5 +900,1,-120.1,514.5 +901,1,-85.8,514.5 +902,1,-51.5,514.5 +903,1,-17.1,514.5 +904,1,17.1,514.5 +905,1,51.5,514.5 +906,1,85.8,514.5 +907,1,120.1,514.5 +908,1,154.4,514.5 +909,1,188.6,514.5 +910,1,222.9,514.5 +911,1,257.2,514.5 +912,1,274.4,480.2 +913,1,291.5,445.9 +914,1,308.7,411.6 +915,1,325.9,377.3 +916,1,343.0,343.0 +917,1,360.1,308.7 +918,1,377.3,274.4 +919,1,394.5,240.1 +920,1,411.6,205.8 +921,1,428.8,171.5 +922,1,445.9,137.2 +923,1,463.0,102.9 +924,1,480.2,68.6 +925,1,497.4,34.3 +926,1,514.5,0.0 +927,1,497.4,-34.3 +928,1,480.2,-68.6 +929,1,463.0,-102.9 +930,1,445.9,-137.2 +931,1,428.8,-171.5 +932,1,411.6,-205.8 +933,1,394.5,-240.1 +934,1,377.3,-274.4 +935,1,360.1,-308.7 +936,1,343.0,-343.0 +937,1,325.9,-377.3 +938,1,308.7,-411.6 +939,1,291.5,-445.9 +940,1,274.4,-480.2 +941,1,257.2,-514.5 +942,1,222.9,-514.5 +943,1,188.6,-514.5 +944,1,154.4,-514.5 +945,1,120.1,-514.5 +946,1,85.8,-514.5 +947,1,51.5,-514.5 +948,1,17.1,-514.5 +949,1,-17.1,-514.5 +950,1,-51.5,-514.5 +951,1,-85.8,-514.5 +952,1,-120.1,-514.5 +953,1,-154.4,-514.5 +954,1,-188.6,-514.5 +955,1,-222.9,-514.5 +956,1,-257.2,-514.5 +957,1,-274.4,-480.2 +958,1,-291.5,-445.9 +959,1,-308.7,-411.6 +960,1,-325.9,-377.3 +961,1,-343.0,-343.0 +962,1,-360.1,-308.7 +963,1,-377.3,-274.4 +964,1,-394.5,-240.1 +965,1,-411.6,-205.8 +966,1,-428.8,-171.5 +967,1,-445.9,-137.2 +968,1,-463.0,-102.9 +969,1,-480.2,-68.6 +970,1,-497.4,-34.3 +971,1,-514.5,0.0 +972,1,-497.4,34.3 +973,1,-480.2,68.6 +974,1,-463.0,102.9 +975,1,-445.9,137.2 +976,1,-428.8,171.5 +977,1,-411.6,205.8 +978,1,-394.5,240.1 +979,1,-377.3,274.4 +980,1,-360.1,308.7 +981,1,-343.0,343.0 +982,1,-325.9,377.3 +983,1,-308.7,411.6 +984,1,-291.5,445.9 +985,1,-274.4,480.2 +986,1,-274.4,548.8 +987,1,-240.1,548.8 +988,1,-205.8,548.8 +989,1,-171.5,548.8 +990,1,-137.2,548.8 +991,1,-102.9,548.8 +992,1,-68.6,548.8 +993,1,-34.3,548.8 +994,1,0.0,548.8 +995,1,34.3,548.8 +996,1,68.6,548.8 +997,1,102.9,548.8 +998,1,137.2,548.8 +999,1,171.5,548.8 +1000,1,205.8,548.8 +1001,1,240.1,548.8 +1002,1,274.4,548.8 +1003,1,291.5,514.5 +1004,1,308.7,480.2 +1005,1,325.9,445.9 +1006,1,343.0,411.6 +1007,1,360.1,377.3 +1008,1,377.3,343.0 +1009,1,394.5,308.7 +1010,1,411.6,274.4 +1011,1,428.8,240.1 +1012,1,445.9,205.8 +1013,1,463.0,171.5 +1014,1,480.2,137.2 +1015,1,497.4,102.9 +1016,1,514.5,68.6 +1017,1,531.7,34.3 +1018,1,548.8,0.0 +1019,1,531.7,-34.3 +1020,1,514.5,-68.6 +1021,1,497.4,-102.9 +1022,1,480.2,-137.2 +1023,1,463.0,-171.5 +1024,1,445.9,-205.8 +1025,1,428.8,-240.1 +1026,1,411.6,-274.4 +1027,1,394.5,-308.7 +1028,1,377.3,-343.0 +1029,1,360.1,-377.3 +1030,1,343.0,-411.6 +1031,1,325.9,-445.9 +1032,1,308.7,-480.2 +1033,1,291.5,-514.5 +1034,1,274.4,-548.8 +1035,1,240.1,-548.8 +1036,1,205.8,-548.8 +1037,1,171.5,-548.8 +1038,1,137.2,-548.8 +1039,1,102.9,-548.8 +1040,1,68.6,-548.8 +1041,1,34.3,-548.8 +1042,1,0.0,-548.8 +1043,1,-34.3,-548.8 +1044,1,-68.6,-548.8 +1045,1,-102.9,-548.8 +1046,1,-137.2,-548.8 +1047,1,-171.5,-548.8 +1048,1,-205.8,-548.8 +1049,1,-240.1,-548.8 +1050,1,-274.4,-548.8 +1051,1,-291.5,-514.5 +1052,1,-308.7,-480.2 +1053,1,-325.9,-445.9 +1054,1,-343.0,-411.6 +1055,1,-360.1,-377.3 +1056,1,-377.3,-343.0 +1057,1,-394.5,-308.7 +1058,1,-411.6,-274.4 +1059,1,-428.8,-240.1 +1060,1,-445.9,-205.8 +1061,1,-463.0,-171.5 +1062,1,-480.2,-137.2 +1063,1,-497.4,-102.9 +1064,1,-514.5,-68.6 +1065,1,-531.7,-34.3 +1066,1,-548.8,0.0 +1067,1,-531.7,34.3 +1068,1,-514.5,68.6 +1069,1,-497.4,102.9 +1070,1,-480.2,137.2 +1071,1,-463.0,171.5 +1072,1,-445.9,205.8 +1073,1,-428.8,240.1 +1074,1,-411.6,274.4 +1075,1,-394.5,308.7 +1076,1,-377.3,343.0 +1077,1,-360.1,377.3 +1078,1,-343.0,411.6 +1079,1,-325.9,445.9 +1080,1,-308.7,480.2 +1081,1,-291.5,514.5 +1082,1,-222.9,583.1 +1083,1,-188.6,583.1 +1084,1,-154.4,583.1 +1085,1,-120.1,583.1 +1086,1,-85.8,583.1 +1087,1,-51.5,583.1 +1088,1,-17.1,583.1 +1089,1,17.1,583.1 +1090,1,51.5,583.1 +1091,1,85.8,583.1 +1092,1,120.1,583.1 +1093,1,154.4,583.1 +1094,1,188.6,583.1 +1095,1,222.9,583.1 +1096,1,308.7,548.8 +1097,1,325.9,514.5 +1098,1,343.0,480.2 +1099,1,360.1,445.9 +1100,1,377.3,411.6 +1101,1,394.5,377.3 +1102,1,411.6,343.0 +1103,1,428.8,308.7 +1104,1,445.9,274.4 +1105,1,463.0,240.1 +1106,1,480.2,205.8 +1107,1,497.4,171.5 +1108,1,514.5,137.2 +1109,1,531.7,102.9 +1110,1,548.8,68.6 +1111,1,566.0,34.3 +1112,1,583.1,0.0 +1113,1,566.0,-34.3 +1114,1,548.8,-68.6 +1115,1,531.7,-102.9 +1116,1,514.5,-137.2 +1117,1,497.4,-171.5 +1118,1,480.2,-205.8 +1119,1,463.0,-240.1 +1120,1,445.9,-274.4 +1121,1,428.8,-308.7 +1122,1,411.6,-343.0 +1123,1,394.5,-377.3 +1124,1,377.3,-411.6 +1125,1,360.1,-445.9 +1126,1,343.0,-480.2 +1127,1,325.9,-514.5 +1128,1,308.7,-548.8 +1129,1,222.9,-583.1 +1130,1,188.6,-583.1 +1131,1,154.4,-583.1 +1132,1,120.1,-583.1 +1133,1,85.8,-583.1 +1134,1,51.5,-583.1 +1135,1,17.1,-583.1 +1136,1,-17.1,-583.1 +1137,1,-51.5,-583.1 +1138,1,-85.8,-583.1 +1139,1,-120.1,-583.1 +1140,1,-154.4,-583.1 +1141,1,-188.6,-583.1 +1142,1,-222.9,-583.1 +1143,1,-308.7,-548.8 +1144,1,-325.9,-514.5 +1145,1,-343.0,-480.2 +1146,1,-360.1,-445.9 +1147,1,-377.3,-411.6 +1148,1,-394.5,-377.3 +1149,1,-411.6,-343.0 +1150,1,-428.8,-308.7 +1151,1,-445.9,-274.4 +1152,1,-463.0,-240.1 +1153,1,-480.2,-205.8 +1154,1,-497.4,-171.5 +1155,1,-514.5,-137.2 +1156,1,-531.7,-102.9 +1157,1,-548.8,-68.6 +1158,1,-566.0,-34.3 +1159,1,-583.1,0.0 +1160,1,-566.0,34.3 +1161,1,-548.8,68.6 +1162,1,-531.7,102.9 +1163,1,-514.5,137.2 +1164,1,-497.4,171.5 +1165,1,-480.2,205.8 +1166,1,-463.0,240.1 +1167,1,-445.9,274.4 +1168,1,-428.8,308.7 +1169,1,-411.6,343.0 +1170,1,-394.5,377.3 +1171,1,-377.3,411.6 +1172,1,-360.1,445.9 +1173,1,-343.0,480.2 +1174,1,-325.9,514.5 +1175,1,-308.7,548.8 +1176,1,-137.2,617.4 +1177,1,-102.9,617.4 +1178,1,-68.6,617.4 +1179,1,-34.3,617.4 +1180,1,0.0,617.4 +1181,1,34.3,617.4 +1182,1,68.6,617.4 +1183,1,102.9,617.4 +1184,1,137.2,617.4 +1185,1,360.1,514.5 +1186,1,377.3,480.2 +1187,1,394.5,445.9 +1188,1,411.6,411.6 +1189,1,428.8,377.3 +1190,1,445.9,343.0 +1191,1,463.0,308.7 +1192,1,480.2,274.4 +1193,1,497.4,240.1 +1194,1,514.5,205.8 +1195,1,531.7,171.5 +1196,1,548.8,137.2 +1197,1,566.0,102.9 +1198,1,583.1,68.6 +1199,1,600.2,34.3 +1200,1,617.4,0.0 +1201,1,600.2,-34.3 +1202,1,583.1,-68.6 +1203,1,566.0,-102.9 +1204,1,548.8,-137.2 +1205,1,531.7,-171.5 +1206,1,514.5,-205.8 +1207,1,497.4,-240.1 +1208,1,480.2,-274.4 +1209,1,463.0,-308.7 +1210,1,445.9,-343.0 +1211,1,428.8,-377.3 +1212,1,411.6,-411.6 +1213,1,394.5,-445.9 +1214,1,377.3,-480.2 +1215,1,360.1,-514.5 +1216,1,137.2,-617.4 +1217,1,102.9,-617.4 +1218,1,68.6,-617.4 +1219,1,34.3,-617.4 +1220,1,0.0,-617.4 +1221,1,-34.3,-617.4 +1222,1,-68.6,-617.4 +1223,1,-102.9,-617.4 +1224,1,-137.2,-617.4 +1225,1,-360.1,-514.5 +1226,1,-377.3,-480.2 +1227,1,-394.5,-445.9 +1228,1,-411.6,-411.6 +1229,1,-428.8,-377.3 +1230,1,-445.9,-343.0 +1231,1,-463.0,-308.7 +1232,1,-480.2,-274.4 +1233,1,-497.4,-240.1 +1234,1,-514.5,-205.8 +1235,1,-531.7,-171.5 +1236,1,-548.8,-137.2 +1237,1,-566.0,-102.9 +1238,1,-583.1,-68.6 +1239,1,-600.2,-34.3 +1240,1,-617.4,0.0 +1241,1,-600.2,34.3 +1242,1,-583.1,68.6 +1243,1,-566.0,102.9 +1244,1,-548.8,137.2 +1245,1,-531.7,171.5 +1246,1,-514.5,205.8 +1247,1,-497.4,240.1 +1248,1,-480.2,274.4 +1249,1,-463.0,308.7 +1250,1,-445.9,343.0 +1251,1,-428.8,377.3 +1252,1,-411.6,411.6 +1253,1,-394.5,445.9 +1254,1,-377.3,480.2 +1255,1,-360.1,514.5 +1256,1,411.6,480.2 +1257,1,428.8,445.9 +1258,1,445.9,411.6 +1259,1,463.0,377.3 +1260,1,480.2,343.0 +1261,1,497.4,308.7 +1262,1,514.5,274.4 +1263,1,531.7,240.1 +1264,1,548.8,205.8 +1265,1,566.0,171.5 +1266,1,583.1,137.2 +1267,1,600.2,102.9 +1268,1,617.4,68.6 +1269,1,634.5,34.3 +1270,1,634.5,-34.3 +1271,1,617.4,-68.6 +1272,1,600.2,-102.9 +1273,1,583.1,-137.2 +1274,1,566.0,-171.5 +1275,1,548.8,-205.8 +1276,1,531.7,-240.1 +1277,1,514.5,-274.4 +1278,1,497.4,-308.7 +1279,1,480.2,-343.0 +1280,1,463.0,-377.3 +1281,1,445.9,-411.6 +1282,1,428.8,-445.9 +1283,1,411.6,-480.2 +1284,1,-411.6,-480.2 +1285,1,-428.8,-445.9 +1286,1,-445.9,-411.6 +1287,1,-463.0,-377.3 +1288,1,-480.2,-343.0 +1289,1,-497.4,-308.7 +1290,1,-514.5,-274.4 +1291,1,-531.7,-240.1 +1292,1,-548.8,-205.8 +1293,1,-566.0,-171.5 +1294,1,-583.1,-137.2 +1295,1,-600.2,-102.9 +1296,1,-617.4,-68.6 +1297,1,-634.5,-34.3 +1298,1,-634.5,34.3 +1299,1,-617.4,68.6 +1300,1,-600.2,102.9 +1301,1,-583.1,137.2 +1302,1,-566.0,171.5 +1303,1,-548.8,205.8 +1304,1,-531.7,240.1 +1305,1,-514.5,274.4 +1306,1,-497.4,308.7 +1307,1,-480.2,343.0 +1308,1,-463.0,377.3 +1309,1,-445.9,411.6 +1310,1,-428.8,445.9 +1311,1,-411.6,480.2 +1312,1,480.2,411.6 +1313,1,497.4,377.3 +1314,1,514.5,343.0 +1315,1,531.7,308.7 +1316,1,548.8,274.4 +1317,1,566.0,240.1 +1318,1,583.1,205.8 +1319,1,600.2,171.5 +1320,1,617.4,137.2 +1321,1,617.4,-137.2 +1322,1,600.2,-171.5 +1323,1,583.1,-205.8 +1324,1,566.0,-240.1 +1325,1,548.8,-274.4 +1326,1,531.7,-308.7 +1327,1,514.5,-343.0 +1328,1,497.4,-377.3 +1329,1,480.2,-411.6 +1330,1,-480.2,-411.6 +1331,1,-497.4,-377.3 +1332,1,-514.5,-343.0 +1333,1,-531.7,-308.7 +1334,1,-548.8,-274.4 +1335,1,-566.0,-240.1 +1336,1,-583.1,-205.8 +1337,1,-600.2,-171.5 +1338,1,-617.4,-137.2 +1339,1,-617.4,137.2 +1340,1,-600.2,171.5 +1341,1,-583.1,205.8 +1342,1,-566.0,240.1 +1343,1,-548.8,274.4 +1344,1,-531.7,308.7 +1345,1,-514.5,343.0 +1346,1,-497.4,377.3 +1347,1,-480.2,411.6 diff --git a/CaloClusterGNN/data/crystal_map_raw.csv b/CaloClusterGNN/data/crystal_map_raw.csv new file mode 100644 index 0000000..410b2af --- /dev/null +++ b/CaloClusterGNN/data/crystal_map_raw.csv @@ -0,0 +1,2741 @@ +y,x,disk,phi,crate,board,sensor,BoardIdx,MBconn,ConnIdx,BoardChan,FEEchan,xcry,ycry,cryID,rouID,offID,Type +0,0,0,0,0,2,0,4,5,5,19,99,-137.2,-617.4,550,0,1100,CAL +0,1,0,0,0,3,0,6,5,2,16,136,-102.9,-617.4,549,0,1098,CAL +0,2,0,0,0,3,0,6,5,3,17,137,-68.6,-617.4,548,0,1096,CAL +0,3,0,0,0,3,0,6,5,4,18,138,-34.3,-617.4,547,0,1094,CAL +0,4,0,0,0,3,0,6,5,5,19,139,0,-617.4,546,0,1092,CAL +0,5,0,1,0,3,0,46,1,2,0,920,34.3,-617.4,545,0,1090,CAL +0,6,0,1,0,3,0,46,1,3,1,921,68.6,-617.4,544,0,1088,CAL +0,7,0,1,0,3,0,46,1,4,2,922,102.9,-617.4,543,0,1086,CAL +0,8,0,1,0,3,0,46,1,5,3,923,137.2,-617.4,542,0,1084,CAL +1,0,0,0,0,2,0,4,4,5,15,95,-222.9,-583.1,468,0,936,CAL +1,1,0,0,0,2,0,4,5,2,16,96,-188.6,-583.1,467,0,934,CAL +1,2,0,0,0,2,0,4,5,3,17,97,-154.4,-583.1,466,0,932,CAL +1,3,0,0,0,2,0,4,5,4,18,98,-120.1,-583.1,465,0,930,CAL +1,4,0,0,0,3,0,6,4,3,13,133,-85.8,-583.1,464,0,928,CAL +1,5,0,0,0,3,0,6,4,4,14,134,-51.5,-583.1,463,0,926,CAL +1,6,0,0,0,3,0,6,4,5,15,135,-17.1,-583.1,462,0,924,CAL +1,7,0,1,0,3,0,46,2,2,4,924,17.1,-583.1,461,0,922,CAL +1,8,0,1,0,3,0,46,2,3,5,925,51.5,-583.1,460,0,920,CAL +1,9,0,1,0,3,0,46,2,4,6,926,85.8,-583.1,459,0,918,CAL +1,10,0,1,0,3,0,46,2,5,7,927,120.1,-583.1,458,0,916,CAL +1,11,0,1,0,2,0,44,1,2,0,880,154.4,-583.1,457,0,914,CAL +1,12,0,1,0,2,0,44,1,3,1,881,188.6,-583.1,456,0,912,CAL +1,13,0,1,0,2,0,44,1,4,2,882,222.9,-583.1,455,0,910,CAL +2,0,0,0,0,1,0,2,5,4,18,58,-308.7,-548.8,469,0,938,CAL +2,1,0,0,0,1,0,2,5,5,19,59,-274.4,-548.8,376,0,752,CAL +2,2,0,0,0,2,0,4,3,5,11,91,-240.1,-548.8,375,0,750,CAL +2,3,0,0,0,2,0,4,4,2,12,92,-205.8,-548.8,374,0,748,CAL +2,4,0,0,0,2,0,4,4,3,13,93,-171.5,-548.8,373,0,746,CAL +2,5,0,0,0,2,0,4,4,4,14,94,-137.2,-548.8,372,0,744,CAL +2,6,0,0,0,3,0,6,3,4,10,130,-102.9,-548.8,371,0,742,CAL +2,7,0,0,0,3,0,6,3,5,11,131,-68.6,-548.8,370,0,740,CAL +2,8,0,0,0,3,0,6,4,2,12,132,-34.3,-548.8,369,0,738,CAL +2,9,0,1,0,3,0,46,3,2,8,928,0,-548.8,368,0,736,CAL +2,10,0,1,0,3,0,46,3,3,9,929,34.3,-548.8,367,0,734,CAL +2,11,0,1,0,3,0,46,3,4,10,930,68.6,-548.8,366,0,732,CAL +2,12,0,1,0,3,0,46,3,5,11,931,102.9,-548.8,365,0,730,CAL +2,13,0,1,0,2,0,44,1,5,3,883,137.2,-548.8,364,0,728,CAL +2,14,0,1,0,2,0,44,2,2,4,884,171.5,-548.8,363,0,726,CAL +2,15,0,1,0,2,0,44,2,3,5,885,205.8,-548.8,362,0,724,CAL +2,16,0,1,0,2,0,44,2,4,6,886,240.1,-548.8,361,0,722,CAL +2,17,0,1,0,1,0,42,1,2,0,840,274.4,-548.8,360,0,720,CAL +2,18,0,1,0,1,0,42,1,3,1,841,308.7,-548.8,454,0,908,CAL +3,0,0,0,0,0,0,0,5,5,19,19,-360.1,-514.5,551,0,1102,CAL +3,1,0,0,0,1,0,2,4,4,14,54,-325.9,-514.5,470,0,940,CAL +3,2,0,0,0,1,0,2,4,5,15,55,-291.5,-514.5,377,0,754,CAL +3,3,0,0,0,1,0,2,5,2,16,56,-257.2,-514.5,282,0,564,CAL +3,4,0,0,0,1,0,2,5,3,17,57,-222.9,-514.5,281,0,562,CAL +3,5,0,0,0,2,0,4,3,2,8,88,-188.6,-514.5,280,0,560,CAL +3,6,0,0,0,2,0,4,3,3,9,89,-154.4,-514.5,279,0,558,CAL +3,7,0,0,0,2,0,4,3,4,10,90,-120.1,-514.5,278,0,556,CAL +3,8,0,0,0,3,0,6,2,5,7,127,-85.8,-514.5,277,0,554,CAL +3,9,0,0,0,3,0,6,3,2,8,128,-51.5,-514.5,276,0,552,CAL +3,10,0,0,0,3,0,6,3,3,9,129,-17.1,-514.5,275,0,550,CAL +3,11,0,1,0,3,0,46,4,2,12,932,17.1,-514.5,274,0,548,CAL +3,12,0,1,0,3,1,46,4,3,13,933,51.5,-514.5,273,1,547,CAL +3,13,0,1,0,2,0,44,2,5,7,887,85.8,-514.5,272,0,544,CAL +3,14,0,1,0,2,0,44,3,2,8,888,120.1,-514.5,271,0,542,CAL +3,15,0,1,0,2,0,44,3,3,9,889,154.4,-514.5,270,0,540,CAL +3,16,0,1,0,2,0,44,3,4,10,890,188.6,-514.5,269,0,538,CAL +3,17,0,1,0,1,0,42,1,4,2,842,222.9,-514.5,268,0,536,CAL +3,18,0,1,0,1,0,42,1,5,3,843,257.2,-514.5,267,0,534,CAL +3,19,0,1,0,1,0,42,2,2,4,844,291.5,-514.5,359,0,718,CAL +3,20,0,1,0,1,0,42,2,3,5,845,325.9,-514.5,453,0,906,CAL +3,21,0,1,0,0,0,40,1,2,0,800,360.1,-514.5,541,0,1082,CAL +4,0,0,0,0,0,0,0,4,5,15,15,-411.6,-480.2,610,0,1220,CAPHRI +4,1,0,0,0,0,0,0,5,2,16,16,-377.3,-480.2,552,0,1104,CAL +4,2,0,0,0,0,0,0,5,3,17,17,-343,-480.2,471,0,942,CAL +4,3,0,0,0,0,0,0,5,4,18,18,-308.7,-480.2,378,0,756,CAL +4,4,0,0,0,1,0,2,3,5,11,51,-274.4,-480.2,283,0,566,CAL +4,5,0,0,0,1,0,2,4,2,12,52,-240.1,-480.2,194,0,388,CAL +4,6,0,0,0,1,0,2,4,3,13,53,-205.8,-480.2,193,0,386,CAL +4,7,0,0,0,2,0,4,2,3,5,85,-171.5,-480.2,192,0,384,CAL +4,8,0,0,0,2,0,4,2,4,6,86,-137.2,-480.2,191,0,382,CAL +4,9,0,0,0,2,0,4,2,5,7,87,-102.9,-480.2,190,0,380,CAL +4,10,0,0,0,3,0,6,2,2,4,124,-68.6,-480.2,189,0,378,CAL +4,11,0,0,0,3,0,6,2,3,5,125,-34.3,-480.2,188,0,376,CAL +4,12,0,0,0,3,0,6,2,4,6,126,0,-480.2,187,0,374,CAL +4,13,0,1,0,3,0,46,4,4,14,934,34.3,-480.2,186,0,372,CAL +4,14,0,1,0,3,0,46,4,5,15,935,68.6,-480.2,185,0,370,CAL +4,15,0,1,0,2,0,44,3,5,11,891,102.9,-480.2,184,0,368,CAL +4,16,0,1,0,2,0,44,4,2,12,892,137.2,-480.2,183,0,366,CAL +4,17,0,1,0,2,0,44,4,3,13,893,171.5,-480.2,182,0,364,CAL +4,18,0,1,0,1,0,42,2,4,6,846,205.8,-480.2,181,0,362,CAL +4,19,0,1,0,1,0,42,2,5,7,847,240.1,-480.2,180,0,360,CAL +4,20,0,1,0,1,0,42,3,2,8,848,274.4,-480.2,266,0,532,CAL +4,21,0,1,0,1,0,42,3,3,9,849,308.7,-480.2,358,0,716,CAL +4,22,0,1,0,0,0,40,1,3,1,801,343,-480.2,452,0,904,CAL +4,23,0,1,0,0,0,40,1,4,2,802,377.3,-480.2,540,0,1080,CAL +4,24,0,1,0,0,0,40,1,5,3,803,411.6,-480.2,609,0,1218,CAPHRI +5,0,0,0,1,3,0,14,5,5,19,299,-428.8,-445.9,611,0,1222,CAL +5,1,0,0,0,0,0,0,3,5,11,11,-394.5,-445.9,553,0,1106,CAL +5,2,0,0,0,0,0,0,4,2,12,12,-360.1,-445.9,472,0,944,CAL +5,3,0,0,0,0,0,0,4,3,13,13,-325.9,-445.9,379,0,758,CAL +5,4,0,0,0,0,0,0,4,4,14,14,-291.5,-445.9,284,0,568,CAL +5,5,0,0,0,1,0,2,3,2,8,48,-257.2,-445.9,195,0,390,CAL +5,6,0,0,0,1,0,2,3,3,9,49,-222.9,-445.9,112,0,224,CAL +5,7,0,0,0,1,0,2,3,4,10,50,-188.6,-445.9,111,0,222,CAL +5,8,0,0,0,2,0,4,1,4,2,82,-154.4,-445.9,110,0,220,CAL +5,9,0,0,0,2,0,4,1,5,3,83,-120.1,-445.9,109,0,218,CAL +5,10,0,0,0,2,0,4,2,2,4,84,-85.8,-445.9,108,0,216,CAL +5,11,0,0,0,3,0,6,1,4,2,122,-51.5,-445.9,107,0,214,CAL +5,12,0,0,0,3,0,6,1,5,3,123,-17.1,-445.9,106,0,212,CAL +5,13,0,1,0,3,0,46,5,2,16,936,17.1,-445.9,105,0,210,CAL +5,14,0,1,0,3,0,46,5,3,17,937,51.5,-445.9,104,0,208,CAL +5,15,0,1,0,2,0,44,4,4,14,894,85.8,-445.9,103,0,206,CAL +5,16,0,1,0,2,0,44,4,5,15,895,120.1,-445.9,102,0,204,CAL +5,17,0,1,0,2,0,44,5,2,16,896,154.4,-445.9,101,0,202,CAL +5,18,0,1,0,1,0,42,3,4,10,850,188.6,-445.9,100,0,200,CAL +5,19,0,1,0,1,0,42,3,5,11,851,222.9,-445.9,99,0,198,CAL +5,20,0,1,0,1,0,42,4,2,12,852,257.2,-445.9,179,0,358,CAL +5,21,0,1,0,1,0,42,4,3,13,853,291.5,-445.9,265,0,530,CAL +5,22,0,1,0,0,0,40,2,2,4,804,325.9,-445.9,357,0,714,CAL +5,23,0,1,0,0,0,40,2,3,5,805,360.1,-445.9,451,0,902,CAL +5,24,0,1,0,0,0,40,2,4,6,806,394.5,-445.9,539,0,1078,CAL +5,25,0,1,0,0,0,40,2,5,7,807,428.8,-445.9,608,0,1216,CAL +6,0,0,0,1,3,0,14,5,4,18,298,-480.2,-411.6,656,0,1312,CAL +6,1,0,0,1,3,0,14,5,3,17,297,-445.9,-411.6,612,0,1224,CAL +6,2,0,0,1,3,0,14,5,2,16,296,-411.6,-411.6,554,0,1108,CAL +6,3,0,0,0,0,0,0,2,5,7,7,-377.3,-411.6,473,0,946,CAL +6,4,0,0,0,0,0,0,3,2,8,8,-343,-411.6,380,0,760,CAL +6,5,0,0,0,0,0,0,3,3,9,9,-308.7,-411.6,285,0,570,CAL +6,6,0,0,0,0,0,0,3,4,10,10,-274.4,-411.6,196,0,392,CAL +6,7,0,0,0,1,0,2,2,3,5,45,-240.1,-411.6,113,0,226,CAL +6,8,0,0,0,1,0,2,2,4,6,46,-205.8,-411.6,50,0,100,CAL +6,9,0,0,0,1,0,2,2,5,7,47,-171.5,-411.6,49,0,98,CAL +6,10,0,0,0,2,0,4,1,2,0,80,-137.2,-411.6,48,0,96,CAL +6,11,0,0,0,2,0,4,1,3,1,81,-102.9,-411.6,47,0,94,CAL +6,12,0,0,0,3,0,6,1,2,0,120,-68.6,-411.6,46,0,92,CAL +6,13,0,0,0,3,0,6,1,3,1,121,-34.3,-411.6,45,0,90,CAL +6,14,0,1,0,3,0,46,5,4,18,938,0,-411.6,44,0,88,CAL +6,15,0,1,0,3,0,46,5,5,19,939,34.3,-411.6,43,0,86,CAL +6,16,0,1,0,2,0,44,5,3,17,897,68.6,-411.6,42,0,84,CAL +6,17,0,1,0,2,0,44,5,4,18,898,102.9,-411.6,41,0,82,CAL +6,18,0,1,0,2,0,44,5,5,19,899,137.2,-411.6,40,0,80,CAL +6,19,0,1,0,1,0,42,4,4,14,854,171.5,-411.6,39,0,78,CAL +6,20,0,1,0,1,0,42,4,5,15,855,205.8,-411.6,38,0,76,CAL +6,21,0,1,0,1,0,42,5,2,16,856,240.1,-411.6,98,0,196,CAL +6,22,0,1,0,0,0,40,3,2,8,808,274.4,-411.6,178,0,356,CAL +6,23,0,1,0,0,0,40,3,3,9,809,308.7,-411.6,264,0,528,CAL +6,24,0,1,0,0,0,40,3,4,10,810,343,-411.6,356,0,712,CAL +6,25,0,1,0,0,0,40,3,5,11,811,377.3,-411.6,450,0,900,CAL +6,26,0,1,1,3,0,54,1,4,2,1082,411.6,-411.6,538,0,1076,CAL +6,27,0,1,1,3,0,54,1,3,1,1081,445.9,-411.6,607,0,1214,CAL +6,28,0,1,1,3,0,54,1,2,0,1080,480.2,-411.6,655,0,1310,CAL +7,0,0,0,1,3,0,14,4,5,15,295,-497.4,-377.3,657,0,1314,CAL +7,1,0,0,1,3,0,14,4,4,14,294,-463,-377.3,613,0,1226,CAL +7,2,0,0,1,3,0,14,4,3,13,293,-428.8,-377.3,555,0,1110,CAL +7,3,0,0,1,3,0,14,4,2,12,292,-394.5,-377.3,474,0,948,CAL +7,4,0,0,1,3,0,14,3,5,11,291,-360.1,-377.3,381,0,762,CAL +7,5,0,0,0,0,0,0,2,2,4,4,-325.9,-377.3,286,0,572,CAL +7,6,0,0,0,0,0,0,2,3,5,5,-291.5,-377.3,197,0,394,CAL +7,7,0,0,0,0,0,0,2,4,6,6,-257.2,-377.3,114,0,228,CAL +7,8,0,0,0,1,0,2,1,3,1,41,-222.9,-377.3,51,0,102,CAL +7,9,0,0,0,1,0,2,1,4,2,42,-188.6,-377.3,13,0,26,CAL +7,10,0,0,0,1,0,2,1,5,3,43,-154.4,-377.3,12,0,24,CAL +7,11,0,0,0,1,0,2,2,2,4,44,-120.1,-377.3,11,0,22,CAL +7,12,0,1,0,1,0,42,5,3,17,857,120.1,-377.3,10,0,20,CAL +7,13,0,1,0,1,0,42,5,4,18,858,154.4,-377.3,9,0,18,CAL +7,14,0,1,0,1,0,42,5,5,19,859,188.6,-377.3,8,0,16,CAL +7,15,0,1,0,0,0,40,4,2,12,812,222.9,-377.3,37,0,74,CAL +7,16,0,1,0,0,0,40,4,3,13,813,257.2,-377.3,97,0,194,CAL +7,17,0,1,0,0,0,40,4,4,14,814,291.5,-377.3,177,0,354,CAL +7,18,0,1,0,0,0,40,4,5,15,815,325.9,-377.3,263,0,526,CAL +7,19,0,1,1,3,0,54,2,5,7,1087,360.1,-377.3,355,0,710,CAL +7,20,0,1,1,3,0,54,2,4,6,1086,394.5,-377.3,449,0,898,CAL +7,21,0,1,1,3,0,54,2,3,5,1085,428.8,-377.3,537,0,1074,CAL +7,22,0,1,1,3,0,54,2,2,4,1084,463,-377.3,606,0,1212,CAL +7,23,0,1,1,3,0,54,1,5,3,1083,497.4,-377.3,654,0,1308,CAL +8,0,0,0,1,2,0,12,5,5,19,259,-514.5,-343,658,0,1316,CAL +8,1,0,0,1,3,0,14,3,4,10,290,-480.2,-343,614,0,1228,CAL +8,2,0,0,1,3,0,14,3,3,9,289,-445.9,-343,556,0,1112,CAL +8,3,0,0,1,3,0,14,3,2,8,288,-411.6,-343,475,0,950,CAL +8,4,0,0,1,3,0,14,2,5,7,287,-377.3,-343,382,0,764,CAL +8,5,0,0,1,3,0,14,2,4,6,286,-343,-343,287,0,574,CAL +8,6,0,0,0,0,0,0,1,3,1,1,-308.7,-343,198,0,396,CAL +8,7,0,0,0,0,0,0,1,4,2,2,-274.4,-343,115,0,230,CAL +8,8,0,0,0,0,0,0,1,5,3,3,-240.1,-343,52,0,104,CAL +8,9,0,0,0,1,0,2,1,2,0,40,-205.8,-343,14,0,28,CAL +8,10,0,1,0,0,0,40,5,2,16,816,205.8,-343,7,0,14,CAL +8,11,0,1,0,0,0,40,5,3,17,817,240.1,-343,36,0,72,CAL +8,12,0,1,0,0,0,40,5,4,18,818,274.4,-343,96,0,192,CAL +8,13,0,1,1,3,0,54,4,2,12,1092,308.7,-343,176,0,352,CAL +8,14,0,1,1,3,0,54,3,5,11,1091,343,-343,262,0,524,CAL +8,15,0,1,1,3,0,54,3,4,10,1090,377.3,-343,354,0,708,CAL +8,16,0,1,1,3,0,54,3,3,9,1089,411.6,-343,448,0,896,CAL +8,17,0,1,1,3,0,54,3,2,8,1088,445.9,-343,536,0,1072,CAL +8,18,0,1,1,2,0,52,1,3,1,1041,480.2,-343,605,0,1210,CAL +8,19,0,1,1,2,0,52,1,2,0,1040,514.5,-343,653,0,1306,CAL +9,0,0,0,1,2,0,12,5,4,18,258,-531.7,-308.7,659,0,1318,CAL +9,1,0,0,1,2,0,12,5,3,17,257,-497.4,-308.7,615,0,1230,CAL +9,2,0,0,1,2,0,12,5,2,16,256,-463,-308.7,557,0,1114,CAL +9,3,0,0,1,2,0,12,4,5,15,255,-428.8,-308.7,476,0,952,CAL +9,4,0,0,1,3,0,14,2,3,5,285,-394.5,-308.7,383,0,766,CAL +9,5,0,0,1,3,0,14,2,2,4,284,-360.1,-308.7,288,0,576,CAL +9,6,0,0,1,3,0,14,1,5,3,283,-325.9,-308.7,199,0,398,CAL +9,7,0,0,1,3,0,14,1,4,2,282,-291.5,-308.7,116,0,232,CAL +9,8,0,0,0,0,0,0,1,2,0,0,-257.2,-308.7,53,0,106,CAL +9,9,0,1,0,0,0,40,5,5,19,819,257.2,-308.7,35,0,70,CAL +9,10,0,1,1,3,0,54,5,2,16,1096,291.5,-308.7,95,0,190,CAL +9,11,0,1,1,3,0,54,4,5,15,1095,325.9,-308.7,175,0,350,CAL +9,12,0,1,1,3,0,54,4,4,14,1094,360.1,-308.7,261,0,522,CAL +9,13,0,1,1,3,0,54,4,3,13,1093,394.5,-308.7,353,0,706,CAL +9,14,0,1,1,2,0,52,2,3,5,1045,428.8,-308.7,447,0,894,CAL +9,15,0,1,1,2,0,52,2,2,4,1044,463,-308.7,535,0,1070,CAL +9,16,0,1,1,2,0,52,1,5,3,1043,497.4,-308.7,604,0,1208,CAL +9,17,0,1,1,2,0,52,1,4,2,1042,531.7,-308.7,652,0,1304,CAL +10,0,0,0,1,2,0,12,4,4,14,254,-548.8,-274.4,660,0,1320,CAL +10,1,0,0,1,2,0,12,4,3,13,253,-514.5,-274.4,616,0,1232,CAL +10,2,0,0,1,2,0,12,4,2,12,252,-480.2,-274.4,558,0,1116,CAL +10,3,0,0,1,2,0,12,3,5,11,251,-445.9,-274.4,477,0,954,CAL +10,4,0,0,1,2,0,12,3,4,10,250,-411.6,-274.4,384,0,768,CAL +10,5,0,0,1,2,0,12,3,3,9,249,-377.3,-274.4,289,0,578,CAL +10,6,0,0,1,3,0,14,1,3,1,281,-343,-274.4,200,0,400,CAL +10,7,0,0,1,3,0,14,1,2,0,280,-308.7,-274.4,117,0,234,CAL +10,8,0,1,1,3,0,54,5,5,19,1099,308.7,-274.4,94,0,188,CAL +10,9,0,1,1,3,0,54,5,4,18,1098,343,-274.4,174,0,348,CAL +10,10,0,1,1,3,0,54,5,3,17,1097,377.3,-274.4,260,0,520,CAL +10,11,0,1,1,2,0,52,3,4,10,1050,411.6,-274.4,352,0,704,CAL +10,12,0,1,1,2,0,52,3,3,9,1049,445.9,-274.4,446,0,892,CAL +10,13,0,1,1,2,0,52,3,2,8,1048,480.2,-274.4,534,0,1068,CAL +10,14,0,1,1,2,0,52,2,5,7,1047,514.5,-274.4,603,0,1206,CAL +10,15,0,1,1,2,0,52,2,4,6,1046,548.8,-274.4,651,0,1302,CAL +11,0,0,0,1,1,0,10,5,4,18,218,-566,-240.1,661,0,1322,CAL +11,1,0,0,1,1,0,10,5,3,17,217,-531.7,-240.1,617,0,1234,CAL +11,2,0,0,1,1,0,10,5,2,16,216,-497.4,-240.1,559,0,1118,CAL +11,3,0,0,1,2,0,12,3,2,8,248,-463,-240.1,478,0,956,CAL +11,4,0,0,1,2,0,12,2,5,7,247,-428.8,-240.1,385,0,770,CAL +11,5,0,0,1,2,0,12,2,4,6,246,-394.5,-240.1,290,0,580,CAL +11,6,0,0,1,2,0,12,2,3,5,245,-360.1,-240.1,201,0,402,CAL +11,7,0,0,1,2,0,12,2,2,4,244,-325.9,-240.1,118,0,236,CAL +11,8,0,1,1,2,0,52,4,5,15,1055,325.9,-240.1,93,0,186,CAL +11,9,0,1,1,2,0,52,4,4,14,1054,360.1,-240.1,173,0,346,CAL +11,10,0,1,1,2,0,52,4,3,13,1053,394.5,-240.1,259,0,518,CAL +11,11,0,1,1,2,0,52,4,2,12,1052,428.8,-240.1,351,0,702,CAL +11,12,0,1,1,2,0,52,3,5,11,1051,463,-240.1,445,0,890,CAL +11,13,0,1,1,1,0,50,1,5,3,1003,497.4,-240.1,533,0,1066,CAL +11,14,0,1,1,1,0,50,1,4,2,1002,531.7,-240.1,602,0,1204,CAL +11,15,0,1,1,1,0,50,1,3,1,1001,566,-240.1,650,0,1300,CAL +12,0,0,0,1,1,0,10,4,5,15,215,-583.1,-205.8,662,0,1324,CAL +12,1,0,0,1,1,0,10,4,4,14,214,-548.8,-205.8,618,0,1236,CAL +12,2,0,0,1,1,0,10,4,3,13,213,-514.5,-205.8,560,0,1120,CAL +12,3,0,0,1,1,0,10,4,2,12,212,-480.2,-205.8,479,0,958,CAL +12,4,0,0,1,2,0,12,1,5,3,243,-445.9,-205.8,386,0,772,CAL +12,5,0,0,1,2,0,12,1,4,2,242,-411.6,-205.8,291,0,582,CAL +12,6,0,0,1,2,0,12,1,3,1,241,-377.3,-205.8,202,0,404,CAL +12,7,0,0,1,2,0,12,1,2,0,240,-343,-205.8,119,0,238,CAL +12,8,0,1,1,2,0,52,5,5,19,1059,343,-205.8,92,0,184,CAL +12,9,0,1,1,2,0,52,5,4,18,1058,377.3,-205.8,172,0,344,CAL +12,10,0,1,1,2,0,52,5,3,17,1057,411.6,-205.8,258,0,516,CAL +12,11,0,1,1,2,0,52,5,2,16,1056,445.9,-205.8,350,0,700,CAL +12,12,0,1,1,1,0,50,2,5,7,1007,480.2,-205.8,444,0,888,CAL +12,13,0,1,1,1,0,50,2,4,6,1006,514.5,-205.8,532,0,1064,CAL +12,14,0,1,1,1,0,50,2,3,5,1005,548.8,-205.8,601,0,1202,CAL +12,15,0,1,1,1,0,50,2,2,4,1004,583.1,-205.8,649,0,1298,CAL +13,0,0,0,1,1,0,10,3,5,11,211,-600.2,-171.5,663,0,1326,CAL +13,1,0,0,1,1,0,10,3,4,10,210,-566,-171.5,619,0,1238,CAL +13,2,0,0,1,1,0,10,3,3,9,209,-531.7,-171.5,561,0,1122,CAL +13,3,0,0,1,1,0,10,3,2,8,208,-497.4,-171.5,480,0,960,CAL +13,4,0,0,1,1,0,10,2,5,7,207,-463,-171.5,387,0,774,CAL +13,5,0,0,1,1,0,10,2,4,6,206,-428.8,-171.5,292,0,584,CAL +13,6,0,0,1,1,0,10,2,3,5,205,-394.5,-171.5,203,0,406,CAL +13,7,0,0,1,1,0,10,2,2,4,204,-360.1,-171.5,120,0,240,CAL +13,8,0,1,1,1,0,50,4,5,15,1015,360.1,-171.5,91,0,182,CAL +13,9,0,1,1,1,0,50,4,4,14,1014,394.5,-171.5,171,0,342,CAL +13,10,0,1,1,1,0,50,4,3,13,1013,428.8,-171.5,257,0,514,CAL +13,11,0,1,1,1,0,50,4,2,12,1012,463,-171.5,349,0,698,CAL +13,12,0,1,1,1,0,50,3,5,11,1011,497.4,-171.5,443,0,886,CAL +13,13,0,1,1,1,0,50,3,4,10,1010,531.7,-171.5,531,0,1062,CAL +13,14,0,1,1,1,0,50,3,3,9,1009,566,-171.5,600,0,1200,CAL +13,15,0,1,1,1,0,50,3,2,8,1008,600.2,-171.5,648,0,1296,CAL +14,0,0,0,1,0,0,8,5,3,17,177,-617.4,-137.2,664,0,1328,CAL +14,1,0,0,1,0,0,8,4,4,14,174,-583.1,-137.2,620,0,1240,CAL +14,2,0,0,1,0,0,8,3,5,11,171,-548.8,-137.2,562,0,1124,CAL +14,3,0,0,1,0,0,8,3,2,8,168,-514.5,-137.2,481,0,962,CAL +14,4,0,0,1,1,0,10,1,5,3,203,-480.2,-137.2,388,0,776,CAL +14,5,0,0,1,1,0,10,1,4,2,202,-445.9,-137.2,293,0,586,CAL +14,6,0,0,1,1,0,10,1,3,1,201,-411.6,-137.2,204,0,408,CAL +14,7,0,0,1,1,0,10,1,2,0,200,-377.3,-137.2,121,0,242,CAL +14,8,0,1,1,1,0,50,5,5,19,1019,377.3,-137.2,90,0,180,CAL +14,9,0,1,1,1,0,50,5,4,18,1018,411.6,-137.2,170,0,340,CAL +14,10,0,1,1,1,0,50,5,3,17,1017,445.9,-137.2,256,0,512,CAL +14,11,0,1,1,1,0,50,5,2,16,1016,480.2,-137.2,348,0,696,CAL +14,12,0,1,1,0,0,48,3,4,10,970,514.5,-137.2,442,0,884,CAL +14,13,0,1,1,0,0,48,2,5,7,967,548.8,-137.2,530,0,1060,CAL +14,14,0,1,1,0,0,48,2,2,4,964,583.1,-137.2,599,0,1198,CAL +14,15,0,1,1,0,0,48,1,3,1,961,617.4,-137.2,647,0,1294,CAL +15,0,0,0,1,0,0,8,5,2,16,176,-600.2,-102.9,621,0,1242,CAL +15,1,0,0,1,0,0,8,4,3,13,173,-566,-102.9,563,0,1126,CAL +15,2,0,0,1,0,0,8,3,4,10,170,-531.7,-102.9,482,0,964,CAL +15,3,0,0,1,0,0,8,2,5,7,167,-497.4,-102.9,389,0,778,CAL +15,4,0,0,1,0,0,8,2,3,5,165,-463,-102.9,294,0,588,CAL +15,5,0,0,1,0,0,8,1,5,3,163,-428.8,-102.9,205,0,410,CAL +15,6,0,0,1,0,0,8,1,3,1,161,-394.5,-102.9,122,0,244,CAL +15,7,0,1,1,0,0,48,5,3,17,977,394.5,-102.9,89,0,178,CAL +15,8,0,1,1,0,0,48,4,5,15,975,428.8,-102.9,169,0,338,CAL +15,9,0,1,1,0,0,48,4,3,13,973,463,-102.9,255,0,510,CAL +15,10,0,1,1,0,0,48,3,5,11,971,497.4,-102.9,347,0,694,CAL +15,11,0,1,1,0,0,48,3,2,8,968,531.7,-102.9,441,0,882,CAL +15,12,0,1,1,0,0,48,2,3,5,965,566,-102.9,529,0,1058,CAL +15,13,0,1,1,0,0,48,1,4,2,962,600.2,-102.9,598,0,1196,CAL +16,0,0,0,1,0,0,8,4,5,15,175,-617.4,-68.6,622,0,1244,CAL +16,1,0,0,1,0,0,8,4,2,12,172,-583.1,-68.6,564,0,1128,CAL +16,2,0,0,1,0,0,8,3,3,9,169,-548.8,-68.6,483,0,966,CAL +16,3,0,0,1,0,0,8,2,4,6,166,-514.5,-68.6,390,0,780,CAL +16,4,0,0,1,0,0,8,2,2,4,164,-480.2,-68.6,295,0,590,CAL +16,5,0,0,1,0,0,8,1,4,2,162,-445.9,-68.6,206,0,412,CAL +16,6,0,0,1,0,0,8,1,2,0,160,-411.6,-68.6,123,0,246,CAL +16,7,0,1,1,0,0,48,5,4,18,978,411.6,-68.6,88,0,176,CAL +16,8,0,1,1,0,0,48,5,2,16,976,445.9,-68.6,168,0,336,CAL +16,9,0,1,1,0,0,48,4,4,14,974,480.2,-68.6,254,0,508,CAL +16,10,0,1,1,0,0,48,4,2,12,972,514.5,-68.6,346,0,692,CAL +16,11,0,1,1,0,0,48,3,3,9,969,548.8,-68.6,440,0,880,CAL +16,12,0,1,1,0,0,48,2,4,6,966,583.1,-68.6,528,0,1056,CAL +16,13,0,1,1,0,0,48,1,5,3,963,617.4,-68.6,597,0,1194,CAL +17,0,0,0,2,2,0,20,5,5,19,419,-634.5,-34.3,623,0,1246,CAL +17,1,0,0,2,2,0,20,5,4,18,418,-600.2,-34.3,565,0,1130,CAL +17,2,0,0,2,2,0,20,5,2,16,416,-566,-34.3,484,0,968,CAL +17,3,0,0,2,2,0,20,4,4,14,414,-531.7,-34.3,391,0,782,CAL +17,4,0,0,2,2,0,20,3,5,11,411,-497.4,-34.3,296,0,592,CAL +17,5,0,0,2,2,0,20,3,2,8,408,-463,-34.3,207,0,414,CAL +17,6,0,0,2,2,0,20,2,3,5,405,-428.8,-34.3,124,0,248,CAL +17,7,0,0,2,2,0,20,1,4,2,402,-394.5,-34.3,54,0,108,CAL +17,8,0,1,2,2,0,60,5,5,19,1219,394.5,-34.3,34,0,68,CAL +17,9,0,1,2,2,0,60,5,2,16,1216,428.8,-34.3,87,0,174,CAL +17,10,0,1,2,2,0,60,4,3,13,1213,463,-34.3,167,0,334,CAL +17,11,0,1,2,2,0,60,3,4,10,1210,497.4,-34.3,253,0,506,CAL +17,12,0,1,2,2,0,60,2,5,7,1207,531.7,-34.3,345,0,690,CAL +17,13,0,1,2,2,0,60,2,2,4,1204,566,-34.3,439,0,878,CAL +17,14,0,1,2,2,0,60,1,4,2,1202,600.2,-34.3,527,0,1054,CAL +17,15,0,1,2,2,0,60,1,2,0,1200,634.5,-34.3,596,0,1192,CAL +18,0,0,0,2,2,0,20,5,3,17,417,-617.4,0,566,0,1132,CAL +18,1,0,0,2,2,0,20,4,5,15,415,-583.1,0,485,0,970,CAL +18,2,0,0,2,2,0,20,4,3,13,413,-548.8,0,392,0,784,CAL +18,3,0,0,2,2,0,20,3,4,10,410,-514.5,0,297,0,594,CAL +18,4,0,0,2,2,0,20,2,5,7,407,-480.2,0,208,0,416,CAL +18,5,0,0,2,2,0,20,2,2,4,404,-445.9,0,125,0,250,CAL +18,6,0,0,2,2,0,20,1,3,1,401,-411.6,0,55,0,110,CAL +18,7,0,1,2,2,0,60,5,3,17,1217,411.6,0,33,0,66,CAL +18,8,0,1,2,2,0,60,4,4,14,1214,445.9,0,86,0,172,CAL +18,9,0,1,2,2,0,60,3,5,11,1211,480.2,0,166,0,332,CAL +18,10,0,1,2,2,0,60,3,2,8,1208,514.5,0,252,0,504,CAL +18,11,0,1,2,2,0,60,2,3,5,1205,548.8,0,344,0,688,CAL +18,12,0,1,2,2,0,60,1,5,3,1203,583.1,0,438,0,876,CAL +18,13,0,1,2,2,0,60,1,3,1,1201,617.4,0,526,0,1052,CAL +19,0,0,0,2,1,0,18,5,5,19,379,-634.5,34.3,624,0,1248,CAL +19,1,0,0,2,1,0,18,4,5,15,375,-600.2,34.3,567,0,1134,CAL +19,2,0,0,2,1,0,18,4,4,14,374,-566,34.3,486,0,972,CAL +19,3,0,0,2,2,0,20,4,2,12,412,-531.7,34.3,393,0,786,CAL +19,4,0,0,2,2,0,20,3,3,9,409,-497.4,34.3,298,0,596,CAL +19,5,0,0,2,2,0,20,2,4,6,406,-463,34.3,209,0,418,CAL +19,6,0,0,2,2,0,20,1,5,3,403,-428.8,34.3,126,0,252,CAL +19,7,0,0,2,2,0,20,1,2,0,400,-394.5,34.3,56,0,112,CAL +19,8,0,1,2,2,0,60,5,4,18,1218,394.5,34.3,32,0,64,CAL +19,9,0,1,2,2,0,60,4,5,15,1215,428.8,34.3,85,0,170,CAL +19,10,0,1,2,2,0,60,4,2,12,1212,463,34.3,165,0,330,CAL +19,11,0,1,2,2,0,60,3,3,9,1209,497.4,34.3,251,0,502,CAL +19,12,0,1,2,2,0,60,2,4,6,1206,531.7,34.3,343,0,686,CAL +19,13,0,1,2,1,0,58,1,4,2,1162,566,34.3,437,0,874,CAL +19,14,0,1,2,1,0,58,1,3,1,1161,600.2,34.3,525,0,1050,CAL +19,15,0,1,2,1,0,58,1,2,0,1160,634.5,34.3,595,0,1190,CAL +20,0,0,0,2,1,0,18,5,4,18,378,-617.4,68.6,625,0,1250,CAL +20,1,0,0,2,1,0,18,4,3,13,373,-583.1,68.6,568,0,1136,CAL +20,2,0,0,2,1,0,18,4,2,12,372,-548.8,68.6,487,0,974,CAL +20,3,0,0,2,1,0,18,2,5,7,367,-514.5,68.6,394,0,788,CAL +20,4,0,0,2,1,0,18,2,4,6,366,-480.2,68.6,299,0,598,CAL +20,5,0,0,2,1,0,18,1,5,3,363,-445.9,68.6,210,0,420,CAL +20,6,0,0,2,1,0,18,1,4,2,362,-411.6,68.6,127,0,254,CAL +20,7,0,1,2,1,0,58,5,4,18,1178,411.6,68.6,84,0,168,CAL +20,8,0,1,2,1,0,58,5,2,16,1176,445.9,68.6,164,0,328,CAL +20,9,0,1,2,1,0,58,4,4,14,1174,480.2,68.6,250,0,500,CAL +20,10,0,1,2,1,0,58,4,2,12,1172,514.5,68.6,342,0,684,CAL +20,11,0,1,2,1,0,58,3,3,9,1169,548.8,68.6,436,0,872,CAL +20,12,0,1,2,1,0,58,2,4,6,1166,583.1,68.6,524,0,1048,CAL +20,13,0,1,2,1,0,58,1,5,3,1163,617.4,68.6,594,0,1188,CAL +21,0,0,0,2,1,0,18,5,3,17,377,-600.2,102.9,626,0,1252,CAL +21,1,0,0,2,1,0,18,3,3,9,369,-566,102.9,569,0,1138,CAL +21,2,0,0,2,1,0,18,3,2,8,368,-531.7,102.9,488,0,976,CAL +21,3,0,0,2,1,0,18,2,3,5,365,-497.4,102.9,395,0,790,CAL +21,4,0,0,2,1,0,18,2,2,4,364,-463,102.9,300,0,600,CAL +21,5,0,0,2,1,0,18,1,3,1,361,-428.8,102.9,211,0,422,CAL +21,6,0,0,2,1,0,18,1,2,0,360,-394.5,102.9,128,0,256,CAL +21,7,0,1,2,1,0,58,5,5,19,1179,394.5,102.9,83,0,166,CAL +21,8,0,1,2,1,0,58,5,3,17,1177,428.8,102.9,163,0,326,CAL +21,9,0,1,2,1,0,58,4,5,15,1175,463,102.9,249,0,498,CAL +21,10,0,1,2,1,0,58,4,3,13,1173,497.4,102.9,341,0,682,CAL +21,11,0,1,2,1,0,58,3,4,10,1170,531.7,102.9,435,0,870,CAL +21,12,0,1,2,1,0,58,2,5,7,1167,566,102.9,523,0,1046,CAL +21,13,0,1,2,1,0,58,2,2,4,1164,600.2,102.9,593,0,1186,CAL +22,0,0,0,2,1,0,18,5,2,16,376,-617.4,137.2,665,0,1330,CAL +22,1,0,0,2,1,0,18,3,5,11,371,-583.1,137.2,627,0,1254,CAL +22,2,0,0,2,1,0,18,3,4,10,370,-548.8,137.2,570,0,1140,CAL +22,3,0,0,2,0,0,16,4,2,12,332,-514.5,137.2,489,0,978,CAL +22,4,0,0,2,0,0,16,3,3,9,329,-480.2,137.2,396,0,792,CAL +22,5,0,0,2,0,0,16,2,4,6,326,-445.9,137.2,301,0,602,CAL +22,6,0,0,2,0,0,16,1,5,3,323,-411.6,137.2,212,0,424,CAL +22,7,0,0,2,0,0,16,1,3,1,321,-377.3,137.2,129,0,258,CAL +22,8,0,1,2,0,0,56,5,4,18,1138,377.3,137.2,82,0,164,CAL +22,9,0,1,2,0,0,56,5,2,16,1136,411.6,137.2,162,0,324,CAL +22,10,0,1,2,0,0,56,4,3,13,1133,445.9,137.2,248,0,496,CAL +22,11,0,1,2,0,0,56,3,4,10,1130,480.2,137.2,340,0,680,CAL +22,12,0,1,2,0,0,56,2,5,7,1127,514.5,137.2,434,0,868,CAL +22,13,0,1,2,1,0,58,3,5,11,1171,548.8,137.2,522,0,1044,CAL +22,14,0,1,2,1,0,58,3,2,8,1168,583.1,137.2,592,0,1184,CAL +22,15,0,1,2,1,0,58,2,3,5,1165,617.4,137.2,646,0,1292,CAL +23,0,0,0,2,0,0,16,5,5,19,339,-600.2,171.5,666,0,1332,CAL +23,1,0,0,2,0,0,16,5,2,16,336,-566,171.5,628,0,1256,CAL +23,2,0,0,2,0,0,16,4,4,14,334,-531.7,171.5,571,0,1142,CAL +23,3,0,0,2,0,0,16,3,5,11,331,-497.4,171.5,490,0,980,CAL +23,4,0,0,2,0,0,16,3,2,8,328,-463,171.5,397,0,794,CAL +23,5,0,0,2,0,0,16,2,3,5,325,-428.8,171.5,302,0,604,CAL +23,6,0,0,2,0,0,16,1,4,2,322,-394.5,171.5,213,0,426,CAL +23,7,0,0,2,0,0,16,1,2,0,320,-360.1,171.5,130,0,260,CAL +23,8,0,1,2,0,0,56,5,5,19,1139,360.1,171.5,81,0,162,CAL +23,9,0,1,2,0,0,56,5,3,17,1137,394.5,171.5,161,0,322,CAL +23,10,0,1,2,0,0,56,4,4,14,1134,428.8,171.5,247,0,494,CAL +23,11,0,1,2,0,0,56,3,5,11,1131,463,171.5,339,0,678,CAL +23,12,0,1,2,0,0,56,3,2,8,1128,497.4,171.5,433,0,866,CAL +23,13,0,1,2,0,0,56,2,3,5,1125,531.7,171.5,521,0,1042,CAL +23,14,0,1,2,0,0,56,1,5,3,1123,566,171.5,591,0,1182,CAL +23,15,0,1,2,0,0,56,1,2,0,1120,600.2,171.5,645,0,1290,CAL +24,0,0,0,2,0,0,16,5,4,18,338,-583.1,205.8,667,0,1334,CAL +24,1,0,0,2,0,0,16,4,5,15,335,-548.8,205.8,629,0,1258,CAL +24,2,0,0,2,0,0,16,4,3,13,333,-514.5,205.8,572,0,1144,CAL +24,3,0,0,2,0,0,16,3,4,10,330,-480.2,205.8,491,0,982,CAL +24,4,0,0,2,0,0,16,2,5,7,327,-445.9,205.8,398,0,796,CAL +24,5,0,0,2,0,0,16,2,2,4,324,-411.6,205.8,303,0,606,CAL +24,6,0,0,3,2,0,28,1,3,1,561,-377.3,205.8,214,0,428,CAL +24,7,0,0,3,2,0,28,1,2,0,560,-343,205.8,131,0,262,CAL +24,8,0,1,3,2,0,68,5,5,19,1379,343,205.8,80,0,160,CAL +24,9,0,1,3,2,0,68,5,4,18,1378,377.3,205.8,160,0,320,CAL +24,10,0,1,2,0,0,56,4,5,15,1135,411.6,205.8,246,0,492,CAL +24,11,0,1,2,0,0,56,4,2,12,1132,445.9,205.8,338,0,676,CAL +24,12,0,1,2,0,0,56,3,3,9,1129,480.2,205.8,432,0,864,CAL +24,13,0,1,2,0,0,56,2,4,6,1126,514.5,205.8,520,0,1040,CAL +24,14,0,1,2,0,0,56,2,2,4,1124,548.8,205.8,590,0,1180,CAL +24,15,0,1,2,0,0,56,1,3,1,1121,583.1,205.8,644,0,1288,CAL +25,0,0,0,2,0,0,16,5,3,17,337,-566,240.1,668,0,1336,CAL +25,1,0,0,3,2,0,28,5,4,18,578,-531.7,240.1,630,0,1260,CAL +25,2,0,0,3,2,0,28,5,3,17,577,-497.4,240.1,573,0,1146,CAL +25,3,0,0,3,2,0,28,5,2,16,576,-463,240.1,492,0,984,CAL +25,4,0,0,3,2,0,28,2,3,5,565,-428.8,240.1,399,0,798,CAL +25,5,0,0,3,2,0,28,2,2,4,564,-394.5,240.1,304,0,608,CAL +25,6,0,0,3,2,0,28,1,5,3,563,-360.1,240.1,215,0,430,CAL +25,7,0,0,3,2,0,28,1,4,2,562,-325.9,240.1,132,0,264,CAL +25,8,0,1,3,2,0,68,5,3,17,1377,325.9,240.1,79,0,158,CAL +25,9,0,1,3,2,0,68,5,2,16,1376,360.1,240.1,159,0,318,CAL +25,10,0,1,3,2,0,68,4,5,15,1375,394.5,240.1,245,0,490,CAL +25,11,0,1,3,2,0,68,4,4,14,1374,428.8,240.1,337,0,674,CAL +25,12,0,1,3,2,0,68,1,5,3,1363,463,240.1,431,0,862,CAL +25,13,0,1,3,2,0,68,1,4,2,1362,497.4,240.1,519,0,1038,CAL +25,14,0,1,3,2,0,68,1,3,1,1361,531.7,240.1,589,0,1178,CAL +25,15,0,1,2,0,0,56,1,4,2,1122,566,240.1,643,0,1286,CAL +26,0,0,0,3,2,0,28,5,5,19,579,-548.8,274.4,669,0,1338,CAL +26,1,0,0,3,2,0,28,4,4,14,574,-514.5,274.4,631,0,1262,CAL +26,2,0,0,3,2,0,28,4,3,13,573,-480.2,274.4,574,0,1148,CAL +26,3,0,0,3,2,0,28,4,2,12,572,-445.9,274.4,493,0,986,CAL +26,4,0,0,3,2,0,28,2,5,7,567,-411.6,274.4,400,0,800,CAL +26,5,0,0,3,2,0,28,2,4,6,566,-377.3,274.4,305,0,610,CAL +26,6,0,0,3,1,0,26,1,3,1,521,-343,274.4,216,0,432,CAL +26,7,0,0,3,1,0,26,1,2,0,520,-308.7,274.4,133,0,266,CAL +26,8,0,1,3,1,0,66,5,5,19,1339,308.7,274.4,78,0,156,CAL +26,9,0,1,3,1,0,66,5,4,18,1338,343,274.4,158,0,316,CAL +26,10,0,1,3,2,0,68,4,3,13,1373,377.3,274.4,244,0,488,CAL +26,11,0,1,3,2,0,68,4,2,12,1372,411.6,274.4,336,0,672,CAL +26,12,0,1,3,2,0,68,2,5,7,1367,445.9,274.4,430,0,860,CAL +26,13,0,1,3,2,0,68,2,4,6,1366,480.2,274.4,518,0,1036,CAL +26,14,0,1,3,2,0,68,2,3,5,1365,514.5,274.4,588,0,1176,CAL +26,15,0,1,3,2,0,68,1,2,0,1360,548.8,274.4,642,0,1284,CAL +27,0,0,0,3,2,0,28,4,5,15,575,-531.7,308.7,670,0,1340,CAL +27,1,0,0,3,2,0,28,3,4,10,570,-497.4,308.7,632,0,1264,CAL +27,2,0,0,3,2,0,28,3,3,9,569,-463,308.7,575,0,1150,CAL +27,3,0,0,3,2,0,28,3,2,8,568,-428.8,308.7,494,0,988,CAL +27,4,0,0,3,1,0,26,2,4,6,526,-394.5,308.7,401,0,802,CAL +27,5,0,0,3,1,0,26,2,3,5,525,-360.1,308.7,306,0,612,CAL +27,6,0,0,3,1,0,26,2,2,4,524,-325.9,308.7,217,0,434,CAL +27,7,0,0,3,1,0,26,1,5,3,523,-291.5,308.7,134,0,268,CAL +27,8,0,0,3,1,0,26,1,4,2,522,-257.2,308.7,57,0,114,CAL +27,9,0,1,3,1,0,66,5,3,17,1337,257.2,308.7,31,0,62,CAL +27,10,0,1,3,1,0,66,5,2,16,1336,291.5,308.7,77,0,154,CAL +27,11,0,1,3,1,0,66,4,5,15,1335,325.9,308.7,157,0,314,CAL +27,12,0,1,3,1,0,66,4,4,14,1334,360.1,308.7,243,0,486,CAL +27,13,0,1,3,1,0,66,4,3,13,1333,394.5,308.7,335,0,670,CAL +27,14,0,1,3,2,0,68,3,5,11,1371,428.8,308.7,429,0,858,CAL +27,15,0,1,3,2,0,68,3,4,10,1370,463,308.7,517,0,1034,CAL +27,16,0,1,3,2,0,68,3,3,9,1369,497.4,308.7,587,0,1174,CAL +27,17,0,1,3,2,0,68,2,2,4,1364,531.7,308.7,641,0,1282,CAL +28,0,0,0,3,2,0,28,3,5,11,571,-514.5,343,671,0,1342,CAL +28,1,0,0,3,1,0,26,3,5,11,531,-480.2,343,633,0,1266,CAL +28,2,0,0,3,1,0,26,3,4,10,530,-445.9,343,576,0,1152,CAL +28,3,0,0,3,1,0,26,3,3,9,529,-411.6,343,495,0,990,CAL +28,4,0,0,3,1,0,26,3,2,8,528,-377.3,343,402,0,804,CAL +28,5,0,0,3,1,0,26,2,5,7,527,-343,343,307,0,614,CAL +28,6,0,0,3,0,0,24,1,5,3,483,-308.7,343,218,0,436,CAL +28,7,0,0,3,0,0,24,1,4,2,482,-274.4,343,135,0,270,CAL +28,8,0,0,3,0,0,24,1,3,1,481,-240.1,343,58,0,116,CAL +28,9,0,0,3,0,0,24,1,2,0,480,-205.8,343,15,0,30,CAL +28,10,0,1,3,0,0,64,5,5,19,1299,205.8,343,6,0,12,CAL +28,11,0,1,3,0,0,64,5,4,18,1298,240.1,343,30,0,60,CAL +28,12,0,1,3,0,0,64,5,3,17,1297,274.4,343,76,0,152,CAL +28,13,0,1,3,0,0,64,5,2,16,1296,308.7,343,156,0,312,CAL +28,14,0,1,3,1,0,66,4,2,12,1332,343,343,242,0,484,CAL +28,15,0,1,3,1,0,66,3,5,11,1331,377.3,343,334,0,668,CAL +28,16,0,1,3,1,0,66,3,4,10,1330,411.6,343,428,0,856,CAL +28,17,0,1,3,1,0,66,3,3,9,1329,445.9,343,516,0,1032,CAL +28,18,0,1,3,1,0,66,3,2,8,1328,480.2,343,586,0,1172,CAL +28,19,0,1,3,2,0,68,3,2,8,1368,514.5,343,640,0,1280,CAL +29,0,0,0,3,1,0,26,4,5,15,535,-497.4,377.3,672,0,1344,CAL +29,1,0,0,3,1,0,26,4,4,14,534,-463,377.3,634,0,1268,CAL +29,2,0,0,3,1,0,26,4,3,13,533,-428.8,377.3,577,0,1154,CAL +29,3,0,0,3,1,0,26,4,2,12,532,-394.5,377.3,496,0,992,CAL +29,4,0,0,3,0,0,24,2,5,7,487,-360.1,377.3,403,0,806,CAL +29,5,0,0,3,0,0,24,2,4,6,486,-325.9,377.3,308,0,616,CAL +29,6,0,0,3,0,0,24,2,3,5,485,-291.5,377.3,219,0,438,CAL +29,7,0,0,3,0,0,24,2,2,4,484,-257.2,377.3,136,0,272,CAL +29,8,0,0,4,2,0,36,5,3,17,737,-222.9,377.3,59,0,118,CAL +29,9,0,0,4,2,0,36,5,4,18,738,-188.6,377.3,0,0,0,CAL +29,10,0,0,4,2,0,36,5,5,19,739,-154.4,377.3,1,0,2,CAL +29,11,0,0,4,1,0,34,5,5,19,699,-120.1,377.3,2,0,4,CAL +29,12,0,1,4,1,0,74,1,2,0,1480,120.1,377.3,3,0,6,CAL +29,13,0,1,4,2,0,76,1,2,0,1520,154.4,377.3,4,0,8,CAL +29,14,0,1,4,2,0,76,1,3,1,1521,188.6,377.3,5,0,10,CAL +29,15,0,1,4,2,0,76,1,4,2,1522,222.9,377.3,29,0,58,CAL +29,16,0,1,3,0,0,64,4,5,15,1295,257.2,377.3,75,0,150,CAL +29,17,0,1,3,0,0,64,4,4,14,1294,291.5,377.3,155,0,310,CAL +29,18,0,1,3,0,0,64,4,3,13,1293,325.9,377.3,241,0,482,CAL +29,19,0,1,3,0,0,64,4,2,12,1292,360.1,377.3,333,0,666,CAL +29,20,0,1,3,1,0,66,2,5,7,1327,394.5,377.3,427,0,854,CAL +29,21,0,1,3,1,0,66,2,4,6,1326,428.8,377.3,515,0,1030,CAL +29,22,0,1,3,1,0,66,2,3,5,1325,463,377.3,585,0,1170,CAL +29,23,0,1,3,1,0,66,2,2,4,1324,497.4,377.3,639,0,1278,CAL +30,0,0,0,3,1,0,26,5,4,18,538,-480.2,411.6,673,0,1346,CAL +30,1,0,0,3,1,0,26,5,3,17,537,-445.9,411.6,635,0,1270,CAL +30,2,0,0,3,1,0,26,5,2,16,536,-411.6,411.6,578,0,1156,CAL +30,3,0,0,3,0,0,24,3,5,11,491,-377.3,411.6,497,0,994,CAL +30,4,0,0,3,0,0,24,3,4,10,490,-343,411.6,404,0,808,CAL +30,5,0,0,3,0,0,24,3,3,9,489,-308.7,411.6,309,0,618,CAL +30,6,0,0,3,0,0,24,3,2,8,488,-274.4,411.6,220,0,440,CAL +30,7,0,0,4,2,0,36,4,4,14,734,-240.1,411.6,137,0,274,CAL +30,8,0,0,4,2,0,36,4,5,15,735,-205.8,411.6,16,0,32,CAL +30,9,0,0,4,2,0,36,5,2,16,736,-171.5,411.6,17,0,34,CAL +30,10,0,0,4,1,0,34,5,2,16,696,-137.2,411.6,18,0,36,CAL +30,11,0,0,4,1,0,34,5,3,17,697,-102.9,411.6,19,0,38,CAL +30,12,0,0,4,1,0,34,5,4,18,698,-68.6,411.6,20,0,40,CAL +30,13,0,0,4,0,0,32,5,4,18,658,-34.3,411.6,21,0,42,CAL +30,14,0,0,4,0,0,32,5,5,19,659,0,411.6,22,0,44,CAL +30,15,0,1,4,0,0,72,1,2,0,1440,34.3,411.6,23,0,46,CAL +30,16,0,1,4,1,0,74,1,3,1,1481,68.6,411.6,24,0,48,CAL +30,17,0,1,4,1,0,74,1,4,2,1482,102.9,411.6,25,0,50,CAL +30,18,0,1,4,1,0,74,1,5,3,1483,137.2,411.6,26,0,52,CAL +30,19,0,1,4,2,0,76,1,5,3,1523,171.5,411.6,27,0,54,CAL +30,20,0,1,4,2,0,76,2,2,4,1524,205.8,411.6,28,0,56,CAL +30,21,0,1,4,2,0,76,2,3,5,1525,240.1,411.6,74,0,148,CAL +30,22,0,1,3,0,0,64,3,5,11,1291,274.4,411.6,154,0,308,CAL +30,23,0,1,3,0,0,64,3,4,10,1290,308.7,411.6,240,0,480,CAL +30,24,0,1,3,0,0,64,3,3,9,1289,343,411.6,332,0,664,CAL +30,25,0,1,3,0,0,64,3,2,8,1288,377.3,411.6,426,0,852,CAL +30,26,0,1,3,1,0,66,1,5,3,1323,411.6,411.6,514,0,1028,CAL +30,27,0,1,3,1,0,66,1,4,2,1322,445.9,411.6,584,0,1168,CAL +30,28,0,1,3,1,0,66,1,3,1,1321,480.2,411.6,638,0,1276,CAL +31,0,0,0,3,1,0,26,5,5,19,539,-428.8,445.9,636,0,1272,CAL +31,1,0,0,3,0,0,24,4,5,15,495,-394.5,445.9,579,0,1158,CAL +31,2,0,0,3,0,0,24,4,4,14,494,-360.1,445.9,498,0,996,CAL +31,3,0,0,3,0,0,24,4,3,13,493,-325.9,445.9,405,0,810,CAL +31,4,0,0,3,0,0,24,4,2,12,492,-291.5,445.9,310,0,620,CAL +31,5,0,0,4,2,0,36,3,5,11,731,-257.2,445.9,221,0,442,CAL +31,6,0,0,4,2,0,36,4,2,12,732,-222.9,445.9,60,0,120,CAL +31,7,0,0,4,2,0,36,4,3,13,733,-188.6,445.9,61,0,122,CAL +31,8,0,0,4,1,0,34,4,3,13,693,-154.4,445.9,62,0,124,CAL +31,9,0,0,4,1,0,34,4,4,14,694,-120.1,445.9,63,0,126,CAL +31,10,0,0,4,1,0,34,4,5,15,695,-85.8,445.9,64,0,128,CAL +31,11,0,0,4,0,0,32,5,2,16,656,-51.5,445.9,65,0,130,CAL +31,12,0,0,4,0,0,32,5,3,17,657,-17.1,445.9,66,0,132,CAL +31,13,0,1,4,0,0,72,1,3,1,1441,17.1,445.9,67,0,134,CAL +31,14,0,1,4,0,0,72,1,4,2,1442,51.5,445.9,68,0,136,CAL +31,15,0,1,4,1,0,74,2,2,4,1484,85.8,445.9,69,0,138,CAL +31,16,0,1,4,1,0,74,2,3,5,1485,120.1,445.9,70,0,140,CAL +31,17,0,1,4,1,0,74,2,4,6,1486,154.4,445.9,71,0,142,CAL +31,18,0,1,4,2,0,76,2,4,6,1526,188.6,445.9,72,0,144,CAL +31,19,0,1,4,2,0,76,2,5,7,1527,222.9,445.9,73,0,146,CAL +31,20,0,1,4,2,0,76,3,2,8,1528,257.2,445.9,153,0,306,CAL +31,21,0,1,3,0,0,64,2,5,7,1287,291.5,445.9,239,0,478,CAL +31,22,0,1,3,0,0,64,2,4,6,1286,325.9,445.9,331,0,662,CAL +31,23,0,1,3,0,0,64,2,3,5,1285,360.1,445.9,425,0,850,CAL +31,24,0,1,3,0,0,64,2,2,4,1284,394.5,445.9,513,0,1026,CAL +31,25,0,1,3,1,0,66,1,2,0,1320,428.8,445.9,583,0,1166,CAL +32,0,0,0,3,0,0,24,5,4,18,498,-411.6,480.2,637,0,1274,CAPHRI +32,1,0,0,3,0,0,24,5,3,17,497,-377.3,480.2,580,0,1160,CAL +32,2,0,0,3,0,0,24,5,2,16,496,-343,480.2,499,0,998,CAL +32,3,0,0,4,2,0,36,2,5,7,727,-308.7,480.2,406,0,812,CAL +32,4,0,0,4,2,0,36,3,2,8,728,-274.4,480.2,311,0,622,CAL +32,5,0,0,4,2,0,36,3,3,9,729,-240.1,480.2,138,0,276,CAL +32,6,0,0,4,2,0,36,3,4,10,730,-205.8,480.2,139,0,278,CAL +32,7,0,0,4,1,0,34,3,4,10,690,-171.5,480.2,140,0,280,CAL +32,8,0,0,4,1,0,34,3,5,11,691,-137.2,480.2,141,0,282,CAL +32,9,0,0,4,1,0,34,4,2,12,692,-102.9,480.2,142,0,284,CAL +32,10,0,0,4,0,0,32,4,4,14,654,-68.6,480.2,143,0,286,CAL +32,11,0,0,4,0,0,32,4,5,15,655,-34.3,480.2,144,0,288,CAL +32,12,0,1,4,0,0,72,1,5,3,1443,0,480.2,145,0,290,CAL +32,13,0,1,4,0,0,72,2,2,4,1444,34.3,480.2,146,0,292,CAL +32,14,0,1,4,0,0,72,2,3,5,1445,68.6,480.2,147,0,294,CAL +32,15,0,1,4,1,0,74,2,5,7,1487,102.9,480.2,148,0,296,CAL +32,16,0,1,4,1,1,74,3,2,8,1488,137.2,480.2,149,1,299,CAL +32,17,0,1,4,1,0,74,3,3,9,1489,171.5,480.2,150,0,300,CAL +32,18,0,1,4,2,0,76,3,3,9,1529,205.8,480.2,151,0,302,CAL +32,19,0,1,4,2,0,76,3,4,10,1530,240.1,480.2,152,0,304,CAL +32,20,0,1,4,2,0,76,3,5,11,1531,274.4,480.2,238,0,476,CAL +32,21,0,1,4,2,0,76,4,2,12,1532,308.7,480.2,330,0,660,CAL +32,22,0,1,3,0,0,64,1,5,3,1283,343,480.2,424,0,848,CAL +32,23,0,1,3,0,0,64,1,4,2,1282,377.3,480.2,512,0,1024,CAL +32,24,0,1,3,0,0,64,1,3,1,1281,411.6,480.2,582,0,1164,CAPHRI +33,0,0,0,3,0,0,24,5,5,19,499,-360.1,514.5,581,0,1162,CAL +33,1,0,0,4,2,0,36,1,5,3,723,-325.9,514.5,500,0,1000,CAL +33,2,0,0,4,2,0,36,2,2,4,724,-291.5,514.5,407,0,814,CAL +33,3,0,0,4,2,0,36,2,3,5,725,-257.2,514.5,222,0,444,CAL +33,4,0,0,4,2,0,36,2,4,6,726,-222.9,514.5,223,0,446,CAL +33,5,0,0,4,1,0,34,2,5,7,687,-188.6,514.5,224,0,448,CAL +33,6,0,0,4,1,0,34,3,2,8,688,-154.4,514.5,225,0,450,CAL +33,7,0,0,4,1,0,34,3,3,9,689,-120.1,514.5,226,0,452,CAL +33,8,0,0,4,0,0,32,3,5,11,651,-85.8,514.5,227,0,454,CAL +33,9,0,0,4,0,0,32,4,2,12,652,-51.5,514.5,228,0,456,CAL +33,10,0,0,4,0,0,32,4,3,13,653,-17.1,514.5,229,0,458,CAL +33,11,0,1,4,0,0,72,2,4,6,1446,17.1,514.5,230,0,460,CAL +33,12,0,1,4,0,0,72,2,5,7,1447,51.5,514.5,231,0,462,CAL +33,13,0,1,4,0,0,72,3,2,8,1448,85.8,514.5,232,0,464,CAL +33,14,0,1,4,1,0,74,3,4,10,1490,120.1,514.5,233,0,466,CAL +33,15,0,1,4,1,0,74,3,5,11,1491,154.4,514.5,234,0,468,CAL +33,16,0,1,4,1,0,74,4,2,12,1492,188.6,514.5,235,0,470,CAL +33,17,0,1,4,2,0,76,4,3,13,1533,222.9,514.5,236,0,472,CAL +33,18,0,1,4,2,0,76,4,4,14,1534,257.2,514.5,237,0,474,CAL +33,19,0,1,4,2,0,76,4,5,15,1535,291.5,514.5,329,0,658,CAL +33,20,0,1,4,2,0,76,5,2,16,1536,325.9,514.5,423,0,846,CAL +33,21,0,1,3,0,0,64,1,2,0,1280,360.1,514.5,511,0,1022,CAL +34,0,0,0,4,2,0,36,1,2,0,720,-308.7,548.8,501,0,1002,CAL +34,1,0,0,4,2,0,36,1,3,1,721,-274.4,548.8,312,0,624,CAL +34,2,0,0,4,2,0,36,1,4,2,722,-240.1,548.8,313,0,626,CAL +34,3,0,0,4,1,0,34,1,5,3,683,-205.8,548.8,314,0,628,CAL +34,4,0,0,4,1,0,34,2,2,4,684,-171.5,548.8,315,0,630,CAL +34,5,0,0,4,1,0,34,2,3,5,685,-137.2,548.8,316,0,632,CAL +34,6,0,0,4,1,0,34,2,4,6,686,-102.9,548.8,317,0,634,CAL +34,7,0,0,4,0,0,32,3,2,8,648,-68.6,548.8,318,0,636,CAL +34,8,0,0,4,0,0,32,3,3,9,649,-34.3,548.8,319,0,638,CAL +34,9,0,0,4,0,0,32,3,4,10,650,0,548.8,320,0,640,CAL +34,10,0,1,4,0,0,72,3,3,9,1449,34.3,548.8,321,0,642,CAL +34,11,0,1,4,0,0,72,3,4,10,1450,68.6,548.8,322,0,644,CAL +34,12,0,1,4,0,0,72,3,5,11,1451,102.9,548.8,323,0,646,CAL +34,13,0,1,4,1,0,74,4,3,13,1493,137.2,548.8,324,0,648,CAL +34,14,0,1,4,1,0,74,4,4,14,1494,171.5,548.8,325,0,650,CAL +34,15,0,1,4,1,0,74,4,5,15,1495,205.8,548.8,326,0,652,CAL +34,16,0,1,4,2,0,76,5,3,17,1537,240.1,548.8,327,0,654,CAL +34,17,0,1,4,2,0,76,5,4,18,1538,274.4,548.8,328,0,656,CAL +34,18,0,1,4,2,0,76,5,5,19,1539,308.7,548.8,422,0,844,CAL +35,0,0,0,4,1,0,34,1,2,0,680,-222.9,583.1,408,0,816,CAL +35,1,0,0,4,1,0,34,1,3,1,681,-188.6,583.1,409,0,818,CAL +35,2,0,0,4,1,0,34,1,4,2,682,-154.4,583.1,410,0,820,CAL +35,3,0,0,4,0,0,32,2,2,4,644,-120.1,583.1,411,0,822,CAL +35,4,0,0,4,0,0,32,2,3,5,645,-85.8,583.1,412,0,824,CAL +35,5,0,0,4,0,0,32,2,4,6,646,-51.5,583.1,413,0,826,CAL +35,6,0,0,4,0,0,32,2,5,7,647,-17.1,583.1,414,0,828,CAL +35,7,0,1,4,0,0,72,4,2,12,1452,17.1,583.1,415,0,830,CAL +35,8,0,1,4,0,0,72,4,3,13,1453,51.5,583.1,416,0,832,CAL +35,9,0,1,4,0,0,72,4,4,14,1454,85.8,583.1,417,0,834,CAL +35,10,0,1,4,0,0,72,4,5,15,1455,120.1,583.1,418,0,836,CAL +35,11,0,1,4,1,0,74,5,2,16,1496,154.4,583.1,419,0,838,CAL +35,12,0,1,4,1,0,74,5,3,17,1497,188.6,583.1,420,0,840,CAL +35,13,0,1,4,1,0,74,5,4,18,1498,222.9,583.1,421,0,842,CAL +36,0,0,0,4,0,0,32,1,2,0,640,-137.2,617.4,502,0,1004,CAL +36,1,0,0,4,0,0,32,1,3,1,641,-102.9,617.4,503,0,1006,CAL +36,2,0,0,4,0,0,32,1,4,2,642,-68.6,617.4,504,0,1008,CAL +36,3,0,0,4,0,0,32,1,5,3,643,-34.3,617.4,505,0,1010,CAL +36,4,0,1,4,0,0,72,5,2,16,1456,0,617.4,506,0,1012,CAL +36,5,0,1,4,0,0,72,5,3,17,1457,34.3,617.4,507,0,1014,CAL +36,6,0,1,4,0,0,72,5,4,18,1458,68.6,617.4,508,0,1016,CAL +36,7,0,1,4,0,0,72,5,5,19,1459,102.9,617.4,509,0,1018,CAL +36,8,0,1,4,1,0,74,5,5,19,1499,137.2,617.4,510,0,1020,CAL +0,0,0,0,0,2,1,5,5,5,19,119,-137.2,-617.4,550,1,1101,CAL +0,1,0,0,0,3,1,7,5,2,16,156,-102.9,-617.4,549,1,1099,CAL +0,2,0,0,0,3,1,7,5,3,17,157,-68.6,-617.4,548,1,1097,CAL +0,3,0,0,0,3,1,7,5,4,18,158,-34.3,-617.4,547,1,1095,CAL +0,4,0,0,0,3,1,7,5,5,19,159,0,-617.4,546,1,1093,CAL +0,5,0,1,0,3,1,47,1,2,0,940,34.3,-617.4,545,1,1091,CAL +0,6,0,1,0,3,1,47,1,3,1,941,68.6,-617.4,544,1,1089,CAL +0,7,0,1,0,3,1,47,1,4,2,942,102.9,-617.4,543,1,1087,CAL +0,8,0,1,0,3,1,47,1,5,3,943,137.2,-617.4,542,1,1085,CAL +1,0,0,0,0,2,1,5,4,5,15,115,-222.9,-583.1,468,1,937,CAL +1,1,0,0,0,2,1,5,5,2,16,116,-188.6,-583.1,467,1,935,CAL +1,2,0,0,0,2,1,5,5,3,17,117,-154.4,-583.1,466,1,933,CAL +1,3,0,0,0,2,1,5,5,4,18,118,-120.1,-583.1,465,1,931,CAL +1,4,0,0,0,3,1,7,4,3,13,153,-85.8,-583.1,464,1,929,CAL +1,5,0,0,0,3,1,7,4,4,14,154,-51.5,-583.1,463,1,927,CAL +1,6,0,0,0,3,1,7,4,5,15,155,-17.1,-583.1,462,1,925,CAL +1,7,0,1,0,3,1,47,2,2,4,944,17.1,-583.1,461,1,923,CAL +1,8,0,1,0,3,1,47,2,3,5,945,51.5,-583.1,460,1,921,CAL +1,9,0,1,0,3,1,47,2,4,6,946,85.8,-583.1,459,1,919,CAL +1,10,0,1,0,3,1,47,2,5,7,947,120.1,-583.1,458,1,917,CAL +1,11,0,1,0,2,1,45,1,2,0,900,154.4,-583.1,457,1,915,CAL +1,12,0,1,0,2,1,45,1,3,1,901,188.6,-583.1,456,1,913,CAL +1,13,0,1,0,2,1,45,1,4,2,902,222.9,-583.1,455,1,911,CAL +2,0,0,0,0,1,1,3,5,4,18,78,-308.7,-548.8,469,1,939,CAL +2,1,0,0,0,1,1,3,5,5,19,79,-274.4,-548.8,376,1,753,CAL +2,2,0,0,0,2,1,5,3,5,11,111,-240.1,-548.8,375,1,751,CAL +2,3,0,0,0,2,1,5,4,2,12,112,-205.8,-548.8,374,1,749,CAL +2,4,0,0,0,2,1,5,4,3,13,113,-171.5,-548.8,373,1,747,CAL +2,5,0,0,0,2,1,5,4,4,14,114,-137.2,-548.8,372,1,745,CAL +2,6,0,0,0,3,1,7,3,4,10,150,-102.9,-548.8,371,1,743,CAL +2,7,0,0,0,3,1,7,3,5,11,151,-68.6,-548.8,370,1,741,CAL +2,8,0,0,0,3,1,7,4,2,12,152,-34.3,-548.8,369,1,739,CAL +2,9,0,1,0,3,1,47,3,2,8,948,0,-548.8,368,1,737,CAL +2,10,0,1,0,3,1,47,3,3,9,949,34.3,-548.8,367,1,735,CAL +2,11,0,1,0,3,1,47,3,4,10,950,68.6,-548.8,366,1,733,CAL +2,12,0,1,0,3,1,47,3,5,11,951,102.9,-548.8,365,1,731,CAL +2,13,0,1,0,2,1,45,1,5,3,903,137.2,-548.8,364,1,729,CAL +2,14,0,1,0,2,1,45,2,2,4,904,171.5,-548.8,363,1,727,CAL +2,15,0,1,0,2,1,45,2,3,5,905,205.8,-548.8,362,1,725,CAL +2,16,0,1,0,2,1,45,2,4,6,906,240.1,-548.8,361,1,723,CAL +2,17,0,1,0,1,1,43,1,2,0,860,274.4,-548.8,360,1,721,CAL +2,18,0,1,0,1,1,43,1,3,1,861,308.7,-548.8,454,1,909,CAL +3,0,0,0,0,0,1,1,5,5,19,39,-360.1,-514.5,551,1,1103,CAL +3,1,0,0,0,1,1,3,4,4,14,74,-325.9,-514.5,470,1,941,CAL +3,2,0,0,0,1,1,3,4,5,15,75,-291.5,-514.5,377,1,755,CAL +3,3,0,0,0,1,1,3,5,2,16,76,-257.2,-514.5,282,1,565,CAL +3,4,0,0,0,1,1,3,5,3,17,77,-222.9,-514.5,281,1,563,CAL +3,5,0,0,0,2,1,5,3,2,8,108,-188.6,-514.5,280,1,561,CAL +3,6,0,0,0,2,1,5,3,3,9,109,-154.4,-514.5,279,1,559,CAL +3,7,0,0,0,2,1,5,3,4,10,110,-120.1,-514.5,278,1,557,CAL +3,8,0,0,0,3,1,7,2,5,7,147,-85.8,-514.5,277,1,555,CAL +3,9,0,0,0,3,1,7,3,2,8,148,-51.5,-514.5,276,1,553,CAL +3,10,0,0,0,3,1,7,3,3,9,149,-17.1,-514.5,275,1,551,CAL +3,11,0,1,0,3,1,47,4,2,12,952,17.1,-514.5,274,1,549,CAL +3,12,0,1,0,3,0,47,4,3,13,953,51.5,-514.5,273,0,546,CAL +3,13,0,1,0,2,1,45,2,5,7,907,85.8,-514.5,272,1,545,CAL +3,14,0,1,0,2,1,45,3,2,8,908,120.1,-514.5,271,1,543,CAL +3,15,0,1,0,2,1,45,3,3,9,909,154.4,-514.5,270,1,541,CAL +3,16,0,1,0,2,1,45,3,4,10,910,188.6,-514.5,269,1,539,CAL +3,17,0,1,0,1,1,43,1,4,2,862,222.9,-514.5,268,1,537,CAL +3,18,0,1,0,1,1,43,1,5,3,863,257.2,-514.5,267,1,535,CAL +3,19,0,1,0,1,1,43,2,2,4,864,291.5,-514.5,359,1,719,CAL +3,20,0,1,0,1,1,43,2,3,5,865,325.9,-514.5,453,1,907,CAL +3,21,0,1,0,0,1,41,1,2,0,820,360.1,-514.5,541,1,1083,CAL +4,0,0,0,0,0,1,1,4,5,15,35,-411.6,-480.2,610,1,1221,CAPHRI +4,1,0,0,0,0,1,1,5,2,16,36,-377.3,-480.2,552,1,1105,CAL +4,2,0,0,0,0,1,1,5,3,17,37,-343,-480.2,471,1,943,CAL +4,3,0,0,0,0,1,1,5,4,18,38,-308.7,-480.2,378,1,757,CAL +4,4,0,0,0,1,1,3,3,5,11,71,-274.4,-480.2,283,1,567,CAL +4,5,0,0,0,1,1,3,4,2,12,72,-240.1,-480.2,194,1,389,CAL +4,6,0,0,0,1,1,3,4,3,13,73,-205.8,-480.2,193,1,387,CAL +4,7,0,0,0,2,1,5,2,3,5,105,-171.5,-480.2,192,1,385,CAL +4,8,0,0,0,2,1,5,2,4,6,106,-137.2,-480.2,191,1,383,CAL +4,9,0,0,0,2,1,5,2,5,7,107,-102.9,-480.2,190,1,381,CAL +4,10,0,0,0,3,1,7,2,2,4,144,-68.6,-480.2,189,1,379,CAL +4,11,0,0,0,3,1,7,2,3,5,145,-34.3,-480.2,188,1,377,CAL +4,12,0,0,0,3,1,7,2,4,6,146,0,-480.2,187,1,375,CAL +4,13,0,1,0,3,1,47,4,4,14,954,34.3,-480.2,186,1,373,CAL +4,14,0,1,0,3,1,47,4,5,15,955,68.6,-480.2,185,1,371,CAL +4,15,0,1,0,2,1,45,3,5,11,911,102.9,-480.2,184,1,369,CAL +4,16,0,1,0,2,1,45,4,2,12,912,137.2,-480.2,183,1,367,CAL +4,17,0,1,0,2,1,45,4,3,13,913,171.5,-480.2,182,1,365,CAL +4,18,0,1,0,1,1,43,2,4,6,866,205.8,-480.2,181,1,363,CAL +4,19,0,1,0,1,1,43,2,5,7,867,240.1,-480.2,180,1,361,CAL +4,20,0,1,0,1,1,43,3,2,8,868,274.4,-480.2,266,1,533,CAL +4,21,0,1,0,1,1,43,3,3,9,869,308.7,-480.2,358,1,717,CAL +4,22,0,1,0,0,1,41,1,3,1,821,343,-480.2,452,1,905,CAL +4,23,0,1,0,0,1,41,1,4,2,822,377.3,-480.2,540,1,1081,CAL +4,24,0,1,0,0,1,41,1,5,3,823,411.6,-480.2,609,1,1219,CAPHRI +5,0,0,0,1,3,1,15,5,5,19,319,-428.8,-445.9,611,1,1223,CAL +5,1,0,0,0,0,1,1,3,5,11,31,-394.5,-445.9,553,1,1107,CAL +5,2,0,0,0,0,1,1,4,2,12,32,-360.1,-445.9,472,1,945,CAL +5,3,0,0,0,0,1,1,4,3,13,33,-325.9,-445.9,379,1,759,CAL +5,4,0,0,0,0,1,1,4,4,14,34,-291.5,-445.9,284,1,569,CAL +5,5,0,0,0,1,1,3,3,2,8,68,-257.2,-445.9,195,1,391,CAL +5,6,0,0,0,1,1,3,3,3,9,69,-222.9,-445.9,112,1,225,CAL +5,7,0,0,0,1,1,3,3,4,10,70,-188.6,-445.9,111,1,223,CAL +5,8,0,0,0,2,1,5,1,4,2,102,-154.4,-445.9,110,1,221,CAL +5,9,0,0,0,2,1,5,1,5,3,103,-120.1,-445.9,109,1,219,CAL +5,10,0,0,0,2,1,5,2,2,4,104,-85.8,-445.9,108,1,217,CAL +5,11,0,0,0,3,1,7,1,4,2,142,-51.5,-445.9,107,1,215,CAL +5,12,0,0,0,3,1,7,1,5,3,143,-17.1,-445.9,106,1,213,CAL +5,13,0,1,0,3,1,47,5,2,16,956,17.1,-445.9,105,1,211,CAL +5,14,0,1,0,3,1,47,5,3,17,957,51.5,-445.9,104,1,209,CAL +5,15,0,1,0,2,1,45,4,4,14,914,85.8,-445.9,103,1,207,CAL +5,16,0,1,0,2,1,45,4,5,15,915,120.1,-445.9,102,1,205,CAL +5,17,0,1,0,2,1,45,5,2,16,916,154.4,-445.9,101,1,203,CAL +5,18,0,1,0,1,1,43,3,4,10,870,188.6,-445.9,100,1,201,CAL +5,19,0,1,0,1,1,43,3,5,11,871,222.9,-445.9,99,1,199,CAL +5,20,0,1,0,1,1,43,4,2,12,872,257.2,-445.9,179,1,359,CAL +5,21,0,1,0,1,1,43,4,3,13,873,291.5,-445.9,265,1,531,CAL +5,22,0,1,0,0,1,41,2,2,4,824,325.9,-445.9,357,1,715,CAL +5,23,0,1,0,0,1,41,2,3,5,825,360.1,-445.9,451,1,903,CAL +5,24,0,1,0,0,1,41,2,4,6,826,394.5,-445.9,539,1,1079,CAL +5,25,0,1,0,0,1,41,2,5,7,827,428.8,-445.9,608,1,1217,CAL +6,0,0,0,1,3,1,15,5,4,18,318,-480.2,-411.6,656,1,1313,CAL +6,1,0,0,1,3,1,15,5,3,17,317,-445.9,-411.6,612,1,1225,CAL +6,2,0,0,1,3,1,15,5,2,16,316,-411.6,-411.6,554,1,1109,CAL +6,3,0,0,0,0,1,1,2,5,7,27,-377.3,-411.6,473,1,947,CAL +6,4,0,0,0,0,1,1,3,2,8,28,-343,-411.6,380,1,761,CAL +6,5,0,0,0,0,1,1,3,3,9,29,-308.7,-411.6,285,1,571,CAL +6,6,0,0,0,0,1,1,3,4,10,30,-274.4,-411.6,196,1,393,CAL +6,7,0,0,0,1,1,3,2,3,5,65,-240.1,-411.6,113,1,227,CAL +6,8,0,0,0,1,1,3,2,4,6,66,-205.8,-411.6,50,1,101,CAL +6,9,0,0,0,1,1,3,2,5,7,67,-171.5,-411.6,49,1,99,CAL +6,10,0,0,0,2,1,5,1,2,0,100,-137.2,-411.6,48,1,97,CAL +6,11,0,0,0,2,1,5,1,3,1,101,-102.9,-411.6,47,1,95,CAL +6,12,0,0,0,3,1,7,1,2,0,140,-68.6,-411.6,46,1,93,CAL +6,13,0,0,0,3,1,7,1,3,1,141,-34.3,-411.6,45,1,91,CAL +6,14,0,1,0,3,1,47,5,4,18,958,0,-411.6,44,1,89,CAL +6,15,0,1,0,3,1,47,5,5,19,959,34.3,-411.6,43,1,87,CAL +6,16,0,1,0,2,1,45,5,3,17,917,68.6,-411.6,42,1,85,CAL +6,17,0,1,0,2,1,45,5,4,18,918,102.9,-411.6,41,1,83,CAL +6,18,0,1,0,2,1,45,5,5,19,919,137.2,-411.6,40,1,81,CAL +6,19,0,1,0,1,1,43,4,4,14,874,171.5,-411.6,39,1,79,CAL +6,20,0,1,0,1,1,43,4,5,15,875,205.8,-411.6,38,1,77,CAL +6,21,0,1,0,1,1,43,5,2,16,876,240.1,-411.6,98,1,197,CAL +6,22,0,1,0,0,1,41,3,2,8,828,274.4,-411.6,178,1,357,CAL +6,23,0,1,0,0,1,41,3,3,9,829,308.7,-411.6,264,1,529,CAL +6,24,0,1,0,0,1,41,3,4,10,830,343,-411.6,356,1,713,CAL +6,25,0,1,0,0,1,41,3,5,11,831,377.3,-411.6,450,1,901,CAL +6,26,0,1,1,3,1,55,1,4,2,1102,411.6,-411.6,538,1,1077,CAL +6,27,0,1,1,3,1,55,1,3,1,1101,445.9,-411.6,607,1,1215,CAL +6,28,0,1,1,3,1,55,1,2,0,1100,480.2,-411.6,655,1,1311,CAL +7,0,0,0,1,3,1,15,4,5,15,315,-497.4,-377.3,657,1,1315,CAL +7,1,0,0,1,3,1,15,4,4,14,314,-463,-377.3,613,1,1227,CAL +7,2,0,0,1,3,1,15,4,3,13,313,-428.8,-377.3,555,1,1111,CAL +7,3,0,0,1,3,1,15,4,2,12,312,-394.5,-377.3,474,1,949,CAL +7,4,0,0,1,3,1,15,3,5,11,311,-360.1,-377.3,381,1,763,CAL +7,5,0,0,0,0,1,1,2,2,4,24,-325.9,-377.3,286,1,573,CAL +7,6,0,0,0,0,1,1,2,3,5,25,-291.5,-377.3,197,1,395,CAL +7,7,0,0,0,0,1,1,2,4,6,26,-257.2,-377.3,114,1,229,CAL +7,8,0,0,0,1,1,3,1,3,1,61,-222.9,-377.3,51,1,103,CAL +7,9,0,0,0,1,1,3,1,4,2,62,-188.6,-377.3,13,1,27,CAL +7,10,0,0,0,1,1,3,1,5,3,63,-154.4,-377.3,12,1,25,CAL +7,11,0,0,0,1,1,3,2,2,4,64,-120.1,-377.3,11,1,23,CAL +7,12,0,1,0,1,1,43,5,3,17,877,120.1,-377.3,10,1,21,CAL +7,13,0,1,0,1,1,43,5,4,18,878,154.4,-377.3,9,1,19,CAL +7,14,0,1,0,1,1,43,5,5,19,879,188.6,-377.3,8,1,17,CAL +7,15,0,1,0,0,1,41,4,2,12,832,222.9,-377.3,37,1,75,CAL +7,16,0,1,0,0,1,41,4,3,13,833,257.2,-377.3,97,1,195,CAL +7,17,0,1,0,0,1,41,4,4,14,834,291.5,-377.3,177,1,355,CAL +7,18,0,1,0,0,1,41,4,5,15,835,325.9,-377.3,263,1,527,CAL +7,19,0,1,1,3,1,55,2,5,7,1107,360.1,-377.3,355,1,711,CAL +7,20,0,1,1,3,1,55,2,4,6,1106,394.5,-377.3,449,1,899,CAL +7,21,0,1,1,3,1,55,2,3,5,1105,428.8,-377.3,537,1,1075,CAL +7,22,0,1,1,3,1,55,2,2,4,1104,463,-377.3,606,1,1213,CAL +7,23,0,1,1,3,1,55,1,5,3,1103,497.4,-377.3,654,1,1309,CAL +8,0,0,0,1,2,1,13,5,5,19,279,-514.5,-343,658,1,1317,CAL +8,1,0,0,1,3,1,15,3,4,10,310,-480.2,-343,614,1,1229,CAL +8,2,0,0,1,3,1,15,3,3,9,309,-445.9,-343,556,1,1113,CAL +8,3,0,0,1,3,1,15,3,2,8,308,-411.6,-343,475,1,951,CAL +8,4,0,0,1,3,1,15,2,5,7,307,-377.3,-343,382,1,765,CAL +8,5,0,0,1,3,1,15,2,4,6,306,-343,-343,287,1,575,CAL +8,6,0,0,0,0,1,1,1,3,1,21,-308.7,-343,198,1,397,CAL +8,7,0,0,0,0,1,1,1,4,2,22,-274.4,-343,115,1,231,CAL +8,8,0,0,0,0,1,1,1,5,3,23,-240.1,-343,52,1,105,CAL +8,9,0,0,0,1,1,3,1,2,0,60,-205.8,-343,14,1,29,CAL +8,10,0,1,0,0,1,41,5,2,16,836,205.8,-343,7,1,15,CAL +8,11,0,1,0,0,1,41,5,3,17,837,240.1,-343,36,1,73,CAL +8,12,0,1,0,0,1,41,5,4,18,838,274.4,-343,96,1,193,CAL +8,13,0,1,1,3,1,55,4,2,12,1112,308.7,-343,176,1,353,CAL +8,14,0,1,1,3,1,55,3,5,11,1111,343,-343,262,1,525,CAL +8,15,0,1,1,3,1,55,3,4,10,1110,377.3,-343,354,1,709,CAL +8,16,0,1,1,3,1,55,3,3,9,1109,411.6,-343,448,1,897,CAL +8,17,0,1,1,3,1,55,3,2,8,1108,445.9,-343,536,1,1073,CAL +8,18,0,1,1,2,1,53,1,3,1,1061,480.2,-343,605,1,1211,CAL +8,19,0,1,1,2,1,53,1,2,0,1060,514.5,-343,653,1,1307,CAL +9,0,0,0,1,2,1,13,5,4,18,278,-531.7,-308.7,659,1,1319,CAL +9,1,0,0,1,2,1,13,5,3,17,277,-497.4,-308.7,615,1,1231,CAL +9,2,0,0,1,2,1,13,5,2,16,276,-463,-308.7,557,1,1115,CAL +9,3,0,0,1,2,1,13,4,5,15,275,-428.8,-308.7,476,1,953,CAL +9,4,0,0,1,3,1,15,2,3,5,305,-394.5,-308.7,383,1,767,CAL +9,5,0,0,1,3,1,15,2,2,4,304,-360.1,-308.7,288,1,577,CAL +9,6,0,0,1,3,1,15,1,5,3,303,-325.9,-308.7,199,1,399,CAL +9,7,0,0,1,3,1,15,1,4,2,302,-291.5,-308.7,116,1,233,CAL +9,8,0,0,0,0,1,1,1,2,0,20,-257.2,-308.7,53,1,107,CAL +9,9,0,1,0,0,1,41,5,5,19,839,257.2,-308.7,35,1,71,CAL +9,10,0,1,1,3,1,55,5,2,16,1116,291.5,-308.7,95,1,191,CAL +9,11,0,1,1,3,1,55,4,5,15,1115,325.9,-308.7,175,1,351,CAL +9,12,0,1,1,3,1,55,4,4,14,1114,360.1,-308.7,261,1,523,CAL +9,13,0,1,1,3,1,55,4,3,13,1113,394.5,-308.7,353,1,707,CAL +9,14,0,1,1,2,1,53,2,3,5,1065,428.8,-308.7,447,1,895,CAL +9,15,0,1,1,2,1,53,2,2,4,1064,463,-308.7,535,1,1071,CAL +9,16,0,1,1,2,1,53,1,5,3,1063,497.4,-308.7,604,1,1209,CAL +9,17,0,1,1,2,1,53,1,4,2,1062,531.7,-308.7,652,1,1305,CAL +10,0,0,0,1,2,1,13,4,4,14,274,-548.8,-274.4,660,1,1321,CAL +10,1,0,0,1,2,1,13,4,3,13,273,-514.5,-274.4,616,1,1233,CAL +10,2,0,0,1,2,1,13,4,2,12,272,-480.2,-274.4,558,1,1117,CAL +10,3,0,0,1,2,1,13,3,5,11,271,-445.9,-274.4,477,1,955,CAL +10,4,0,0,1,2,1,13,3,4,10,270,-411.6,-274.4,384,1,769,CAL +10,5,0,0,1,2,1,13,3,3,9,269,-377.3,-274.4,289,1,579,CAL +10,6,0,0,1,3,1,15,1,3,1,301,-343,-274.4,200,1,401,CAL +10,7,0,0,1,3,1,15,1,2,0,300,-308.7,-274.4,117,1,235,CAL +10,8,0,1,1,3,1,55,5,5,19,1119,308.7,-274.4,94,1,189,CAL +10,9,0,1,1,3,1,55,5,4,18,1118,343,-274.4,174,1,349,CAL +10,10,0,1,1,3,1,55,5,3,17,1117,377.3,-274.4,260,1,521,CAL +10,11,0,1,1,2,1,53,3,4,10,1070,411.6,-274.4,352,1,705,CAL +10,12,0,1,1,2,1,53,3,3,9,1069,445.9,-274.4,446,1,893,CAL +10,13,0,1,1,2,1,53,3,2,8,1068,480.2,-274.4,534,1,1069,CAL +10,14,0,1,1,2,1,53,2,5,7,1067,514.5,-274.4,603,1,1207,CAL +10,15,0,1,1,2,1,53,2,4,6,1066,548.8,-274.4,651,1,1303,CAL +11,0,0,0,1,1,1,11,5,4,18,238,-566,-240.1,661,1,1323,CAL +11,1,0,0,1,1,1,11,5,3,17,237,-531.7,-240.1,617,1,1235,CAL +11,2,0,0,1,1,1,11,5,2,16,236,-497.4,-240.1,559,1,1119,CAL +11,3,0,0,1,2,1,13,3,2,8,268,-463,-240.1,478,1,957,CAL +11,4,0,0,1,2,1,13,2,5,7,267,-428.8,-240.1,385,1,771,CAL +11,5,0,0,1,2,1,13,2,4,6,266,-394.5,-240.1,290,1,581,CAL +11,6,0,0,1,2,1,13,2,3,5,265,-360.1,-240.1,201,1,403,CAL +11,7,0,0,1,2,1,13,2,2,4,264,-325.9,-240.1,118,1,237,CAL +11,8,0,1,1,2,1,53,4,5,15,1075,325.9,-240.1,93,1,187,CAL +11,9,0,1,1,2,1,53,4,4,14,1074,360.1,-240.1,173,1,347,CAL +11,10,0,1,1,2,1,53,4,3,13,1073,394.5,-240.1,259,1,519,CAL +11,11,0,1,1,2,1,53,4,2,12,1072,428.8,-240.1,351,1,703,CAL +11,12,0,1,1,2,1,53,3,5,11,1071,463,-240.1,445,1,891,CAL +11,13,0,1,1,1,1,51,1,5,3,1023,497.4,-240.1,533,1,1067,CAL +11,14,0,1,1,1,1,51,1,4,2,1022,531.7,-240.1,602,1,1205,CAL +11,15,0,1,1,1,1,51,1,3,1,1021,566,-240.1,650,1,1301,CAL +12,0,0,0,1,1,1,11,4,5,15,235,-583.1,-205.8,662,1,1325,CAL +12,1,0,0,1,1,1,11,4,4,14,234,-548.8,-205.8,618,1,1237,CAL +12,2,0,0,1,1,1,11,4,3,13,233,-514.5,-205.8,560,1,1121,CAL +12,3,0,0,1,1,1,11,4,2,12,232,-480.2,-205.8,479,1,959,CAL +12,4,0,0,1,2,1,13,1,5,3,263,-445.9,-205.8,386,1,773,CAL +12,5,0,0,1,2,1,13,1,4,2,262,-411.6,-205.8,291,1,583,CAL +12,6,0,0,1,2,1,13,1,3,1,261,-377.3,-205.8,202,1,405,CAL +12,7,0,0,1,2,1,13,1,2,0,260,-343,-205.8,119,1,239,CAL +12,8,0,1,1,2,1,53,5,5,19,1079,343,-205.8,92,1,185,CAL +12,9,0,1,1,2,1,53,5,4,18,1078,377.3,-205.8,172,1,345,CAL +12,10,0,1,1,2,1,53,5,3,17,1077,411.6,-205.8,258,1,517,CAL +12,11,0,1,1,2,1,53,5,2,16,1076,445.9,-205.8,350,1,701,CAL +12,12,0,1,1,1,1,51,2,5,7,1027,480.2,-205.8,444,1,889,CAL +12,13,0,1,1,1,1,51,2,4,6,1026,514.5,-205.8,532,1,1065,CAL +12,14,0,1,1,1,1,51,2,3,5,1025,548.8,-205.8,601,1,1203,CAL +12,15,0,1,1,1,1,51,2,2,4,1024,583.1,-205.8,649,1,1299,CAL +13,0,0,0,1,1,1,11,3,5,11,231,-600.2,-171.5,663,1,1327,CAL +13,1,0,0,1,1,1,11,3,4,10,230,-566,-171.5,619,1,1239,CAL +13,2,0,0,1,1,1,11,3,3,9,229,-531.7,-171.5,561,1,1123,CAL +13,3,0,0,1,1,1,11,3,2,8,228,-497.4,-171.5,480,1,961,CAL +13,4,0,0,1,1,1,11,2,5,7,227,-463,-171.5,387,1,775,CAL +13,5,0,0,1,1,1,11,2,4,6,226,-428.8,-171.5,292,1,585,CAL +13,6,0,0,1,1,1,11,2,3,5,225,-394.5,-171.5,203,1,407,CAL +13,7,0,0,1,1,1,11,2,2,4,224,-360.1,-171.5,120,1,241,CAL +13,8,0,1,1,1,1,51,4,5,15,1035,360.1,-171.5,91,1,183,CAL +13,9,0,1,1,1,1,51,4,4,14,1034,394.5,-171.5,171,1,343,CAL +13,10,0,1,1,1,1,51,4,3,13,1033,428.8,-171.5,257,1,515,CAL +13,11,0,1,1,1,1,51,4,2,12,1032,463,-171.5,349,1,699,CAL +13,12,0,1,1,1,1,51,3,5,11,1031,497.4,-171.5,443,1,887,CAL +13,13,0,1,1,1,1,51,3,4,10,1030,531.7,-171.5,531,1,1063,CAL +13,14,0,1,1,1,1,51,3,3,9,1029,566,-171.5,600,1,1201,CAL +13,15,0,1,1,1,1,51,3,2,8,1028,600.2,-171.5,648,1,1297,CAL +14,0,0,0,1,0,1,9,5,3,17,197,-617.4,-137.2,664,1,1329,CAL +14,1,0,0,1,0,1,9,4,4,14,194,-583.1,-137.2,620,1,1241,CAL +14,2,0,0,1,0,1,9,3,5,11,191,-548.8,-137.2,562,1,1125,CAL +14,3,0,0,1,0,1,9,3,2,8,188,-514.5,-137.2,481,1,963,CAL +14,4,0,0,1,1,1,11,1,5,3,223,-480.2,-137.2,388,1,777,CAL +14,5,0,0,1,1,1,11,1,4,2,222,-445.9,-137.2,293,1,587,CAL +14,6,0,0,1,1,1,11,1,3,1,221,-411.6,-137.2,204,1,409,CAL +14,7,0,0,1,1,1,11,1,2,0,220,-377.3,-137.2,121,1,243,CAL +14,8,0,1,1,1,1,51,5,5,19,1039,377.3,-137.2,90,1,181,CAL +14,9,0,1,1,1,1,51,5,4,18,1038,411.6,-137.2,170,1,341,CAL +14,10,0,1,1,1,1,51,5,3,17,1037,445.9,-137.2,256,1,513,CAL +14,11,0,1,1,1,1,51,5,2,16,1036,480.2,-137.2,348,1,697,CAL +14,12,0,1,1,0,1,49,3,4,10,990,514.5,-137.2,442,1,885,CAL +14,13,0,1,1,0,1,49,2,5,7,987,548.8,-137.2,530,1,1061,CAL +14,14,0,1,1,0,1,49,2,2,4,984,583.1,-137.2,599,1,1199,CAL +14,15,0,1,1,0,1,49,1,3,1,981,617.4,-137.2,647,1,1295,CAL +15,0,0,0,1,0,1,9,5,2,16,196,-600.2,-102.9,621,1,1243,CAL +15,1,0,0,1,0,1,9,4,3,13,193,-566,-102.9,563,1,1127,CAL +15,2,0,0,1,0,1,9,3,4,10,190,-531.7,-102.9,482,1,965,CAL +15,3,0,0,1,0,1,9,2,5,7,187,-497.4,-102.9,389,1,779,CAL +15,4,0,0,1,0,1,9,2,3,5,185,-463,-102.9,294,1,589,CAL +15,5,0,0,1,0,1,9,1,5,3,183,-428.8,-102.9,205,1,411,CAL +15,6,0,0,1,0,1,9,1,3,1,181,-394.5,-102.9,122,1,245,CAL +15,7,0,1,1,0,1,49,5,3,17,997,394.5,-102.9,89,1,179,CAL +15,8,0,1,1,0,1,49,4,5,15,995,428.8,-102.9,169,1,339,CAL +15,9,0,1,1,0,1,49,4,3,13,993,463,-102.9,255,1,511,CAL +15,10,0,1,1,0,1,49,3,5,11,991,497.4,-102.9,347,1,695,CAL +15,11,0,1,1,0,1,49,3,2,8,988,531.7,-102.9,441,1,883,CAL +15,12,0,1,1,0,1,49,2,3,5,985,566,-102.9,529,1,1059,CAL +15,13,0,1,1,0,1,49,1,4,2,982,600.2,-102.9,598,1,1197,CAL +16,0,0,0,1,0,1,9,4,5,15,195,-617.4,-68.6,622,1,1245,CAL +16,1,0,0,1,0,1,9,4,2,12,192,-583.1,-68.6,564,1,1129,CAL +16,2,0,0,1,0,1,9,3,3,9,189,-548.8,-68.6,483,1,967,CAL +16,3,0,0,1,0,1,9,2,4,6,186,-514.5,-68.6,390,1,781,CAL +16,4,0,0,1,0,1,9,2,2,4,184,-480.2,-68.6,295,1,591,CAL +16,5,0,0,1,0,1,9,1,4,2,182,-445.9,-68.6,206,1,413,CAL +16,6,0,0,1,0,1,9,1,2,0,180,-411.6,-68.6,123,1,247,CAL +16,7,0,1,1,0,1,49,5,4,18,998,411.6,-68.6,88,1,177,CAL +16,8,0,1,1,0,1,49,5,2,16,996,445.9,-68.6,168,1,337,CAL +16,9,0,1,1,0,1,49,4,4,14,994,480.2,-68.6,254,1,509,CAL +16,10,0,1,1,0,1,49,4,2,12,992,514.5,-68.6,346,1,693,CAL +16,11,0,1,1,0,1,49,3,3,9,989,548.8,-68.6,440,1,881,CAL +16,12,0,1,1,0,1,49,2,4,6,986,583.1,-68.6,528,1,1057,CAL +16,13,0,1,1,0,1,49,1,5,3,983,617.4,-68.6,597,1,1195,CAL +17,0,0,0,2,2,1,21,5,5,19,439,-634.5,-34.3,623,1,1247,CAL +17,1,0,0,2,2,1,21,5,4,18,438,-600.2,-34.3,565,1,1131,CAL +17,2,0,0,2,2,1,21,5,2,16,436,-566,-34.3,484,1,969,CAL +17,3,0,0,2,2,1,21,4,4,14,434,-531.7,-34.3,391,1,783,CAL +17,4,0,0,2,2,1,21,3,5,11,431,-497.4,-34.3,296,1,593,CAL +17,5,0,0,2,2,1,21,3,2,8,428,-463,-34.3,207,1,415,CAL +17,6,0,0,2,2,1,21,2,3,5,425,-428.8,-34.3,124,1,249,CAL +17,7,0,0,2,2,1,21,1,4,2,422,-394.5,-34.3,54,1,109,CAL +17,8,0,1,2,2,1,61,5,5,19,1239,394.5,-34.3,34,1,69,CAL +17,9,0,1,2,2,1,61,5,2,16,1236,428.8,-34.3,87,1,175,CAL +17,10,0,1,2,2,1,61,4,3,13,1233,463,-34.3,167,1,335,CAL +17,11,0,1,2,2,1,61,3,4,10,1230,497.4,-34.3,253,1,507,CAL +17,12,0,1,2,2,1,61,2,5,7,1227,531.7,-34.3,345,1,691,CAL +17,13,0,1,2,2,1,61,2,2,4,1224,566,-34.3,439,1,879,CAL +17,14,0,1,2,2,1,61,1,4,2,1222,600.2,-34.3,527,1,1055,CAL +17,15,0,1,2,2,1,61,1,2,0,1220,634.5,-34.3,596,1,1193,CAL +18,0,0,0,2,2,1,21,5,3,17,437,-617.4,0,566,1,1133,CAL +18,1,0,0,2,2,1,21,4,5,15,435,-583.1,0,485,1,971,CAL +18,2,0,0,2,2,1,21,4,3,13,433,-548.8,0,392,1,785,CAL +18,3,0,0,2,2,1,21,3,4,10,430,-514.5,0,297,1,595,CAL +18,4,0,0,2,2,1,21,2,5,7,427,-480.2,0,208,1,417,CAL +18,5,0,0,2,2,1,21,2,2,4,424,-445.9,0,125,1,251,CAL +18,6,0,0,2,2,1,21,1,3,1,421,-411.6,0,55,1,111,CAL +18,7,0,1,2,2,1,61,5,3,17,1237,411.6,0,33,1,67,CAL +18,8,0,1,2,2,1,61,4,4,14,1234,445.9,0,86,1,173,CAL +18,9,0,1,2,2,1,61,3,5,11,1231,480.2,0,166,1,333,CAL +18,10,0,1,2,2,1,61,3,2,8,1228,514.5,0,252,1,505,CAL +18,11,0,1,2,2,1,61,2,3,5,1225,548.8,0,344,1,689,CAL +18,12,0,1,2,2,1,61,1,5,3,1223,583.1,0,438,1,877,CAL +18,13,0,1,2,2,1,61,1,3,1,1221,617.4,0,526,1,1053,CAL +19,0,0,0,2,1,1,19,5,5,19,399,-634.5,34.3,624,1,1249,CAL +19,1,0,0,2,1,1,19,4,5,15,395,-600.2,34.3,567,1,1135,CAL +19,2,0,0,2,1,1,19,4,4,14,394,-566,34.3,486,1,973,CAL +19,3,0,0,2,2,1,21,4,2,12,432,-531.7,34.3,393,1,787,CAL +19,4,0,0,2,2,1,21,3,3,9,429,-497.4,34.3,298,1,597,CAL +19,5,0,0,2,2,1,21,2,4,6,426,-463,34.3,209,1,419,CAL +19,6,0,0,2,2,1,21,1,5,3,423,-428.8,34.3,126,1,253,CAL +19,7,0,0,2,2,1,21,1,2,0,420,-394.5,34.3,56,1,113,CAL +19,8,0,1,2,2,1,61,5,4,18,1238,394.5,34.3,32,1,65,CAL +19,9,0,1,2,2,1,61,4,5,15,1235,428.8,34.3,85,1,171,CAL +19,10,0,1,2,2,1,61,4,2,12,1232,463,34.3,165,1,331,CAL +19,11,0,1,2,2,1,61,3,3,9,1229,497.4,34.3,251,1,503,CAL +19,12,0,1,2,2,1,61,2,4,6,1226,531.7,34.3,343,1,687,CAL +19,13,0,1,2,1,1,59,1,4,2,1182,566,34.3,437,1,875,CAL +19,14,0,1,2,1,1,59,1,3,1,1181,600.2,34.3,525,1,1051,CAL +19,15,0,1,2,1,1,59,1,2,0,1180,634.5,34.3,595,1,1191,CAL +20,0,0,0,2,1,1,19,5,4,18,398,-617.4,68.6,625,1,1251,CAL +20,1,0,0,2,1,1,19,4,3,13,393,-583.1,68.6,568,1,1137,CAL +20,2,0,0,2,1,1,19,4,2,12,392,-548.8,68.6,487,1,975,CAL +20,3,0,0,2,1,1,19,2,5,7,387,-514.5,68.6,394,1,789,CAL +20,4,0,0,2,1,1,19,2,4,6,386,-480.2,68.6,299,1,599,CAL +20,5,0,0,2,1,1,19,1,5,3,383,-445.9,68.6,210,1,421,CAL +20,6,0,0,2,1,1,19,1,4,2,382,-411.6,68.6,127,1,255,CAL +20,7,0,1,2,1,1,59,5,4,18,1198,411.6,68.6,84,1,169,CAL +20,8,0,1,2,1,1,59,5,2,16,1196,445.9,68.6,164,1,329,CAL +20,9,0,1,2,1,1,59,4,4,14,1194,480.2,68.6,250,1,501,CAL +20,10,0,1,2,1,1,59,4,2,12,1192,514.5,68.6,342,1,685,CAL +20,11,0,1,2,1,1,59,3,3,9,1189,548.8,68.6,436,1,873,CAL +20,12,0,1,2,1,1,59,2,4,6,1186,583.1,68.6,524,1,1049,CAL +20,13,0,1,2,1,1,59,1,5,3,1183,617.4,68.6,594,1,1189,CAL +21,0,0,0,2,1,1,19,5,3,17,397,-600.2,102.9,626,1,1253,CAL +21,1,0,0,2,1,1,19,3,3,9,389,-566,102.9,569,1,1139,CAL +21,2,0,0,2,1,1,19,3,2,8,388,-531.7,102.9,488,1,977,CAL +21,3,0,0,2,1,1,19,2,3,5,385,-497.4,102.9,395,1,791,CAL +21,4,0,0,2,1,1,19,2,2,4,384,-463,102.9,300,1,601,CAL +21,5,0,0,2,1,1,19,1,3,1,381,-428.8,102.9,211,1,423,CAL +21,6,0,0,2,1,1,19,1,2,0,380,-394.5,102.9,128,1,257,CAL +21,7,0,1,2,1,1,59,5,5,19,1199,394.5,102.9,83,1,167,CAL +21,8,0,1,2,1,1,59,5,3,17,1197,428.8,102.9,163,1,327,CAL +21,9,0,1,2,1,1,59,4,5,15,1195,463,102.9,249,1,499,CAL +21,10,0,1,2,1,1,59,4,3,13,1193,497.4,102.9,341,1,683,CAL +21,11,0,1,2,1,1,59,3,4,10,1190,531.7,102.9,435,1,871,CAL +21,12,0,1,2,1,1,59,2,5,7,1187,566,102.9,523,1,1047,CAL +21,13,0,1,2,1,1,59,2,2,4,1184,600.2,102.9,593,1,1187,CAL +22,0,0,0,2,1,1,19,5,2,16,396,-617.4,137.2,665,1,1331,CAL +22,1,0,0,2,1,1,19,3,5,11,391,-583.1,137.2,627,1,1255,CAL +22,2,0,0,2,1,1,19,3,4,10,390,-548.8,137.2,570,1,1141,CAL +22,3,0,0,2,0,1,17,4,2,12,352,-514.5,137.2,489,1,979,CAL +22,4,0,0,2,0,1,17,3,3,9,349,-480.2,137.2,396,1,793,CAL +22,5,0,0,2,0,1,17,2,4,6,346,-445.9,137.2,301,1,603,CAL +22,6,0,0,2,0,1,17,1,5,3,343,-411.6,137.2,212,1,425,CAL +22,7,0,0,2,0,1,17,1,3,1,341,-377.3,137.2,129,1,259,CAL +22,8,0,1,2,0,1,57,5,4,18,1158,377.3,137.2,82,1,165,CAL +22,9,0,1,2,0,1,57,5,2,16,1156,411.6,137.2,162,1,325,CAL +22,10,0,1,2,0,1,57,4,3,13,1153,445.9,137.2,248,1,497,CAL +22,11,0,1,2,0,1,57,3,4,10,1150,480.2,137.2,340,1,681,CAL +22,12,0,1,2,0,1,57,2,5,7,1147,514.5,137.2,434,1,869,CAL +22,13,0,1,2,1,1,59,3,5,11,1191,548.8,137.2,522,1,1045,CAL +22,14,0,1,2,1,1,59,3,2,8,1188,583.1,137.2,592,1,1185,CAL +22,15,0,1,2,1,1,59,2,3,5,1185,617.4,137.2,646,1,1293,CAL +23,0,0,0,2,0,1,17,5,5,19,359,-600.2,171.5,666,1,1333,CAL +23,1,0,0,2,0,1,17,5,2,16,356,-566,171.5,628,1,1257,CAL +23,2,0,0,2,0,1,17,4,4,14,354,-531.7,171.5,571,1,1143,CAL +23,3,0,0,2,0,1,17,3,5,11,351,-497.4,171.5,490,1,981,CAL +23,4,0,0,2,0,1,17,3,2,8,348,-463,171.5,397,1,795,CAL +23,5,0,0,2,0,1,17,2,3,5,345,-428.8,171.5,302,1,605,CAL +23,6,0,0,2,0,1,17,1,4,2,342,-394.5,171.5,213,1,427,CAL +23,7,0,0,2,0,1,17,1,2,0,340,-360.1,171.5,130,1,261,CAL +23,8,0,1,2,0,1,57,5,5,19,1159,360.1,171.5,81,1,163,CAL +23,9,0,1,2,0,1,57,5,3,17,1157,394.5,171.5,161,1,323,CAL +23,10,0,1,2,0,1,57,4,4,14,1154,428.8,171.5,247,1,495,CAL +23,11,0,1,2,0,1,57,3,5,11,1151,463,171.5,339,1,679,CAL +23,12,0,1,2,0,1,57,3,2,8,1148,497.4,171.5,433,1,867,CAL +23,13,0,1,2,0,1,57,2,3,5,1145,531.7,171.5,521,1,1043,CAL +23,14,0,1,2,0,1,57,1,5,3,1143,566,171.5,591,1,1183,CAL +23,15,0,1,2,0,1,57,1,2,0,1140,600.2,171.5,645,1,1291,CAL +24,0,0,0,2,0,1,17,5,4,18,358,-583.1,205.8,667,1,1335,CAL +24,1,0,0,2,0,1,17,4,5,15,355,-548.8,205.8,629,1,1259,CAL +24,2,0,0,2,0,1,17,4,3,13,353,-514.5,205.8,572,1,1145,CAL +24,3,0,0,2,0,1,17,3,4,10,350,-480.2,205.8,491,1,983,CAL +24,4,0,0,2,0,1,17,2,5,7,347,-445.9,205.8,398,1,797,CAL +24,5,0,0,2,0,1,17,2,2,4,344,-411.6,205.8,303,1,607,CAL +24,6,0,0,3,2,1,29,1,3,1,581,-377.3,205.8,214,1,429,CAL +24,7,0,0,3,2,1,29,1,2,0,580,-343,205.8,131,1,263,CAL +24,8,0,1,3,2,1,69,5,5,19,1399,343,205.8,80,1,161,CAL +24,9,0,1,3,2,1,69,5,4,18,1398,377.3,205.8,160,1,321,CAL +24,10,0,1,2,0,1,57,4,5,15,1155,411.6,205.8,246,1,493,CAL +24,11,0,1,2,0,1,57,4,2,12,1152,445.9,205.8,338,1,677,CAL +24,12,0,1,2,0,1,57,3,3,9,1149,480.2,205.8,432,1,865,CAL +24,13,0,1,2,0,1,57,2,4,6,1146,514.5,205.8,520,1,1041,CAL +24,14,0,1,2,0,1,57,2,2,4,1144,548.8,205.8,590,1,1181,CAL +24,15,0,1,2,0,1,57,1,3,1,1141,583.1,205.8,644,1,1289,CAL +25,0,0,0,2,0,1,17,5,3,17,357,-566,240.1,668,1,1337,CAL +25,1,0,0,3,2,1,29,5,4,18,598,-531.7,240.1,630,1,1261,CAL +25,2,0,0,3,2,1,29,5,3,17,597,-497.4,240.1,573,1,1147,CAL +25,3,0,0,3,2,1,29,5,2,16,596,-463,240.1,492,1,985,CAL +25,4,0,0,3,2,1,29,2,3,5,585,-428.8,240.1,399,1,799,CAL +25,5,0,0,3,2,1,29,2,2,4,584,-394.5,240.1,304,1,609,CAL +25,6,0,0,3,2,1,29,1,5,3,583,-360.1,240.1,215,1,431,CAL +25,7,0,0,3,2,1,29,1,4,2,582,-325.9,240.1,132,1,265,CAL +25,8,0,1,3,2,1,69,5,3,17,1397,325.9,240.1,79,1,159,CAL +25,9,0,1,3,2,1,69,5,2,16,1396,360.1,240.1,159,1,319,CAL +25,10,0,1,3,2,1,69,4,5,15,1395,394.5,240.1,245,1,491,CAL +25,11,0,1,3,2,1,69,4,4,14,1394,428.8,240.1,337,1,675,CAL +25,12,0,1,3,2,1,69,1,5,3,1383,463,240.1,431,1,863,CAL +25,13,0,1,3,2,1,69,1,4,2,1382,497.4,240.1,519,1,1039,CAL +25,14,0,1,3,2,1,69,1,3,1,1381,531.7,240.1,589,1,1179,CAL +25,15,0,1,2,0,1,57,1,4,2,1142,566,240.1,643,1,1287,CAL +26,0,0,0,3,2,1,29,5,5,19,599,-548.8,274.4,669,1,1339,CAL +26,1,0,0,3,2,1,29,4,4,14,594,-514.5,274.4,631,1,1263,CAL +26,2,0,0,3,2,1,29,4,3,13,593,-480.2,274.4,574,1,1149,CAL +26,3,0,0,3,2,1,29,4,2,12,592,-445.9,274.4,493,1,987,CAL +26,4,0,0,3,2,1,29,2,5,7,587,-411.6,274.4,400,1,801,CAL +26,5,0,0,3,2,1,29,2,4,6,586,-377.3,274.4,305,1,611,CAL +26,6,0,0,3,1,1,27,1,3,1,541,-343,274.4,216,1,433,CAL +26,7,0,0,3,1,1,27,1,2,0,540,-308.7,274.4,133,1,267,CAL +26,8,0,1,3,1,1,67,5,5,19,1359,308.7,274.4,78,1,157,CAL +26,9,0,1,3,1,1,67,5,4,18,1358,343,274.4,158,1,317,CAL +26,10,0,1,3,2,1,69,4,3,13,1393,377.3,274.4,244,1,489,CAL +26,11,0,1,3,2,1,69,4,2,12,1392,411.6,274.4,336,1,673,CAL +26,12,0,1,3,2,1,69,2,5,7,1387,445.9,274.4,430,1,861,CAL +26,13,0,1,3,2,1,69,2,4,6,1386,480.2,274.4,518,1,1037,CAL +26,14,0,1,3,2,1,69,2,3,5,1385,514.5,274.4,588,1,1177,CAL +26,15,0,1,3,2,1,69,1,2,0,1380,548.8,274.4,642,1,1285,CAL +27,0,0,0,3,2,1,29,4,5,15,595,-531.7,308.7,670,1,1341,CAL +27,1,0,0,3,2,1,29,3,4,10,590,-497.4,308.7,632,1,1265,CAL +27,2,0,0,3,2,1,29,3,3,9,589,-463,308.7,575,1,1151,CAL +27,3,0,0,3,2,1,29,3,2,8,588,-428.8,308.7,494,1,989,CAL +27,4,0,0,3,1,1,27,2,4,6,546,-394.5,308.7,401,1,803,CAL +27,5,0,0,3,1,1,27,2,3,5,545,-360.1,308.7,306,1,613,CAL +27,6,0,0,3,1,1,27,2,2,4,544,-325.9,308.7,217,1,435,CAL +27,7,0,0,3,1,1,27,1,5,3,543,-291.5,308.7,134,1,269,CAL +27,8,0,0,3,1,1,27,1,4,2,542,-257.2,308.7,57,1,115,CAL +27,9,0,1,3,1,1,67,5,3,17,1357,257.2,308.7,31,1,63,CAL +27,10,0,1,3,1,1,67,5,2,16,1356,291.5,308.7,77,1,155,CAL +27,11,0,1,3,1,1,67,4,5,15,1355,325.9,308.7,157,1,315,CAL +27,12,0,1,3,1,1,67,4,4,14,1354,360.1,308.7,243,1,487,CAL +27,13,0,1,3,1,1,67,4,3,13,1353,394.5,308.7,335,1,671,CAL +27,14,0,1,3,2,1,69,3,5,11,1391,428.8,308.7,429,1,859,CAL +27,15,0,1,3,2,1,69,3,4,10,1390,463,308.7,517,1,1035,CAL +27,16,0,1,3,2,1,69,3,3,9,1389,497.4,308.7,587,1,1175,CAL +27,17,0,1,3,2,1,69,2,2,4,1384,531.7,308.7,641,1,1283,CAL +28,0,0,0,3,2,1,29,3,5,11,591,-514.5,343,671,1,1343,CAL +28,1,0,0,3,1,1,27,3,5,11,551,-480.2,343,633,1,1267,CAL +28,2,0,0,3,1,1,27,3,4,10,550,-445.9,343,576,1,1153,CAL +28,3,0,0,3,1,1,27,3,3,9,549,-411.6,343,495,1,991,CAL +28,4,0,0,3,1,1,27,3,2,8,548,-377.3,343,402,1,805,CAL +28,5,0,0,3,1,1,27,2,5,7,547,-343,343,307,1,615,CAL +28,6,0,0,3,0,1,25,1,5,3,503,-308.7,343,218,1,437,CAL +28,7,0,0,3,0,1,25,1,4,2,502,-274.4,343,135,1,271,CAL +28,8,0,0,3,0,1,25,1,3,1,501,-240.1,343,58,1,117,CAL +28,9,0,0,3,0,1,25,1,2,0,500,-205.8,343,15,1,31,CAL +28,10,0,1,3,0,1,65,5,5,19,1319,205.8,343,6,1,13,CAL +28,11,0,1,3,0,1,65,5,4,18,1318,240.1,343,30,1,61,CAL +28,12,0,1,3,0,1,65,5,3,17,1317,274.4,343,76,1,153,CAL +28,13,0,1,3,0,1,65,5,2,16,1316,308.7,343,156,1,313,CAL +28,14,0,1,3,1,1,67,4,2,12,1352,343,343,242,1,485,CAL +28,15,0,1,3,1,1,67,3,5,11,1351,377.3,343,334,1,669,CAL +28,16,0,1,3,1,1,67,3,4,10,1350,411.6,343,428,1,857,CAL +28,17,0,1,3,1,1,67,3,3,9,1349,445.9,343,516,1,1033,CAL +28,18,0,1,3,1,1,67,3,2,8,1348,480.2,343,586,1,1173,CAL +28,19,0,1,3,2,1,69,3,2,8,1388,514.5,343,640,1,1281,CAL +29,0,0,0,3,1,1,27,4,5,15,555,-497.4,377.3,672,1,1345,CAL +29,1,0,0,3,1,1,27,4,4,14,554,-463,377.3,634,1,1269,CAL +29,2,0,0,3,1,1,27,4,3,13,553,-428.8,377.3,577,1,1155,CAL +29,3,0,0,3,1,1,27,4,2,12,552,-394.5,377.3,496,1,993,CAL +29,4,0,0,3,0,1,25,2,5,7,507,-360.1,377.3,403,1,807,CAL +29,5,0,0,3,0,1,25,2,4,6,506,-325.9,377.3,308,1,617,CAL +29,6,0,0,3,0,1,25,2,3,5,505,-291.5,377.3,219,1,439,CAL +29,7,0,0,3,0,1,25,2,2,4,504,-257.2,377.3,136,1,273,CAL +29,8,0,0,4,2,1,37,5,3,17,757,-222.9,377.3,59,1,119,CAL +29,9,0,0,4,2,1,37,5,4,18,758,-188.6,377.3,0,1,1,CAL +29,10,0,0,4,2,1,37,5,5,19,759,-154.4,377.3,1,1,3,CAL +29,11,0,0,4,1,1,35,5,5,19,719,-120.1,377.3,2,1,5,CAL +29,12,0,1,4,1,1,75,1,2,0,1500,120.1,377.3,3,1,7,CAL +29,13,0,1,4,2,1,77,1,2,0,1540,154.4,377.3,4,1,9,CAL +29,14,0,1,4,2,1,77,1,3,1,1541,188.6,377.3,5,1,11,CAL +29,15,0,1,4,2,1,77,1,4,2,1542,222.9,377.3,29,1,59,CAL +29,16,0,1,3,0,1,65,4,5,15,1315,257.2,377.3,75,1,151,CAL +29,17,0,1,3,0,1,65,4,4,14,1314,291.5,377.3,155,1,311,CAL +29,18,0,1,3,0,1,65,4,3,13,1313,325.9,377.3,241,1,483,CAL +29,19,0,1,3,0,1,65,4,2,12,1312,360.1,377.3,333,1,667,CAL +29,20,0,1,3,1,1,67,2,5,7,1347,394.5,377.3,427,1,855,CAL +29,21,0,1,3,1,1,67,2,4,6,1346,428.8,377.3,515,1,1031,CAL +29,22,0,1,3,1,1,67,2,3,5,1345,463,377.3,585,1,1171,CAL +29,23,0,1,3,1,1,67,2,2,4,1344,497.4,377.3,639,1,1279,CAL +30,0,0,0,3,1,1,27,5,4,18,558,-480.2,411.6,673,1,1347,CAL +30,1,0,0,3,1,1,27,5,3,17,557,-445.9,411.6,635,1,1271,CAL +30,2,0,0,3,1,1,27,5,2,16,556,-411.6,411.6,578,1,1157,CAL +30,3,0,0,3,0,1,25,3,5,11,511,-377.3,411.6,497,1,995,CAL +30,4,0,0,3,0,1,25,3,4,10,510,-343,411.6,404,1,809,CAL +30,5,0,0,3,0,1,25,3,3,9,509,-308.7,411.6,309,1,619,CAL +30,6,0,0,3,0,1,25,3,2,8,508,-274.4,411.6,220,1,441,CAL +30,7,0,0,4,2,1,37,4,4,14,754,-240.1,411.6,137,1,275,CAL +30,8,0,0,4,2,1,37,4,5,15,755,-205.8,411.6,16,1,33,CAL +30,9,0,0,4,2,1,37,5,2,16,756,-171.5,411.6,17,1,35,CAL +30,10,0,0,4,1,1,35,5,2,16,716,-137.2,411.6,18,1,37,CAL +30,11,0,0,4,1,1,35,5,3,17,717,-102.9,411.6,19,1,39,CAL +30,12,0,0,4,1,1,35,5,4,18,718,-68.6,411.6,20,1,41,CAL +30,13,0,0,4,0,1,33,5,4,18,678,-34.3,411.6,21,1,43,CAL +30,14,0,0,4,0,1,33,5,5,19,679,0,411.6,22,1,45,CAL +30,15,0,1,4,0,1,73,1,2,0,1460,34.3,411.6,23,1,47,CAL +30,16,0,1,4,1,1,75,1,3,1,1501,68.6,411.6,24,1,49,CAL +30,17,0,1,4,1,1,75,1,4,2,1502,102.9,411.6,25,1,51,CAL +30,18,0,1,4,1,1,75,1,5,3,1503,137.2,411.6,26,1,53,CAL +30,19,0,1,4,2,1,77,1,5,3,1543,171.5,411.6,27,1,55,CAL +30,20,0,1,4,2,1,77,2,2,4,1544,205.8,411.6,28,1,57,CAL +30,21,0,1,4,2,1,77,2,3,5,1545,240.1,411.6,74,1,149,CAL +30,22,0,1,3,0,1,65,3,5,11,1311,274.4,411.6,154,1,309,CAL +30,23,0,1,3,0,1,65,3,4,10,1310,308.7,411.6,240,1,481,CAL +30,24,0,1,3,0,1,65,3,3,9,1309,343,411.6,332,1,665,CAL +30,25,0,1,3,0,1,65,3,2,8,1308,377.3,411.6,426,1,853,CAL +30,26,0,1,3,1,1,67,1,5,3,1343,411.6,411.6,514,1,1029,CAL +30,27,0,1,3,1,1,67,1,4,2,1342,445.9,411.6,584,1,1169,CAL +30,28,0,1,3,1,1,67,1,3,1,1341,480.2,411.6,638,1,1277,CAL +31,0,0,0,3,1,1,27,5,5,19,559,-428.8,445.9,636,1,1273,CAL +31,1,0,0,3,0,1,25,4,5,15,515,-394.5,445.9,579,1,1159,CAL +31,2,0,0,3,0,1,25,4,4,14,514,-360.1,445.9,498,1,997,CAL +31,3,0,0,3,0,1,25,4,3,13,513,-325.9,445.9,405,1,811,CAL +31,4,0,0,3,0,1,25,4,2,12,512,-291.5,445.9,310,1,621,CAL +31,5,0,0,4,2,1,37,3,5,11,751,-257.2,445.9,221,1,443,CAL +31,6,0,0,4,2,1,37,4,2,12,752,-222.9,445.9,60,1,121,CAL +31,7,0,0,4,2,1,37,4,3,13,753,-188.6,445.9,61,1,123,CAL +31,8,0,0,4,1,1,35,4,3,13,713,-154.4,445.9,62,1,125,CAL +31,9,0,0,4,1,1,35,4,4,14,714,-120.1,445.9,63,1,127,CAL +31,10,0,0,4,1,1,35,4,5,15,715,-85.8,445.9,64,1,129,CAL +31,11,0,0,4,0,1,33,5,2,16,676,-51.5,445.9,65,1,131,CAL +31,12,0,0,4,0,1,33,5,3,17,677,-17.1,445.9,66,1,133,CAL +31,13,0,1,4,0,1,73,1,3,1,1461,17.1,445.9,67,1,135,CAL +31,14,0,1,4,0,1,73,1,4,2,1462,51.5,445.9,68,1,137,CAL +31,15,0,1,4,1,1,75,2,2,4,1504,85.8,445.9,69,1,139,CAL +31,16,0,1,4,1,1,75,2,3,5,1505,120.1,445.9,70,1,141,CAL +31,17,0,1,4,1,1,75,2,4,6,1506,154.4,445.9,71,1,143,CAL +31,18,0,1,4,2,1,77,2,4,6,1546,188.6,445.9,72,1,145,CAL +31,19,0,1,4,2,1,77,2,5,7,1547,222.9,445.9,73,1,147,CAL +31,20,0,1,4,2,1,77,3,2,8,1548,257.2,445.9,153,1,307,CAL +31,21,0,1,3,0,1,65,2,5,7,1307,291.5,445.9,239,1,479,CAL +31,22,0,1,3,0,1,65,2,4,6,1306,325.9,445.9,331,1,663,CAL +31,23,0,1,3,0,1,65,2,3,5,1305,360.1,445.9,425,1,851,CAL +31,24,0,1,3,0,1,65,2,2,4,1304,394.5,445.9,513,1,1027,CAL +31,25,0,1,3,1,1,67,1,2,0,1340,428.8,445.9,583,1,1167,CAL +32,0,0,0,3,0,1,25,5,4,18,518,-411.6,480.2,637,1,1275,CAPHRI +32,1,0,0,3,0,1,25,5,3,17,517,-377.3,480.2,580,1,1161,CAL +32,2,0,0,3,0,1,25,5,2,16,516,-343,480.2,499,1,999,CAL +32,3,0,0,4,2,1,37,2,5,7,747,-308.7,480.2,406,1,813,CAL +32,4,0,0,4,2,1,37,3,2,8,748,-274.4,480.2,311,1,623,CAL +32,5,0,0,4,2,1,37,3,3,9,749,-240.1,480.2,138,1,277,CAL +32,6,0,0,4,2,1,37,3,4,10,750,-205.8,480.2,139,1,279,CAL +32,7,0,0,4,1,1,35,3,4,10,710,-171.5,480.2,140,1,281,CAL +32,8,0,0,4,1,1,35,3,5,11,711,-137.2,480.2,141,1,283,CAL +32,9,0,0,4,1,1,35,4,2,12,712,-102.9,480.2,142,1,285,CAL +32,10,0,0,4,0,1,33,4,4,14,674,-68.6,480.2,143,1,287,CAL +32,11,0,0,4,0,1,33,4,5,15,675,-34.3,480.2,144,1,289,CAL +32,12,0,1,4,0,1,73,1,5,3,1463,0,480.2,145,1,291,CAL +32,13,0,1,4,0,1,73,2,2,4,1464,34.3,480.2,146,1,293,CAL +32,14,0,1,4,0,1,73,2,3,5,1465,68.6,480.2,147,1,295,CAL +32,15,0,1,4,1,1,75,2,5,7,1507,102.9,480.2,148,1,297,CAL +32,16,0,1,4,1,0,75,3,2,8,1508,137.2,480.2,149,0,298,CAL +32,17,0,1,4,1,1,75,3,3,9,1509,171.5,480.2,150,1,301,CAL +32,18,0,1,4,2,1,77,3,3,9,1549,205.8,480.2,151,1,303,CAL +32,19,0,1,4,2,1,77,3,4,10,1550,240.1,480.2,152,1,305,CAL +32,20,0,1,4,2,1,77,3,5,11,1551,274.4,480.2,238,1,477,CAL +32,21,0,1,4,2,1,77,4,2,12,1552,308.7,480.2,330,1,661,CAL +32,22,0,1,3,0,1,65,1,5,3,1303,343,480.2,424,1,849,CAL +32,23,0,1,3,0,1,65,1,4,2,1302,377.3,480.2,512,1,1025,CAL +32,24,0,1,3,0,1,65,1,3,1,1301,411.6,480.2,582,1,1165,CAPHRI +33,0,0,0,3,0,1,25,5,5,19,519,-360.1,514.5,581,1,1163,CAL +33,1,0,0,4,2,1,37,1,5,3,743,-325.9,514.5,500,1,1001,CAL +33,2,0,0,4,2,1,37,2,2,4,744,-291.5,514.5,407,1,815,CAL +33,3,0,0,4,2,1,37,2,3,5,745,-257.2,514.5,222,1,445,CAL +33,4,0,0,4,2,1,37,2,4,6,746,-222.9,514.5,223,1,447,CAL +33,5,0,0,4,1,1,35,2,5,7,707,-188.6,514.5,224,1,449,CAL +33,6,0,0,4,1,1,35,3,2,8,708,-154.4,514.5,225,1,451,CAL +33,7,0,0,4,1,1,35,3,3,9,709,-120.1,514.5,226,1,453,CAL +33,8,0,0,4,0,1,33,3,5,11,671,-85.8,514.5,227,1,455,CAL +33,9,0,0,4,0,1,33,4,2,12,672,-51.5,514.5,228,1,457,CAL +33,10,0,0,4,0,1,33,4,3,13,673,-17.1,514.5,229,1,459,CAL +33,11,0,1,4,0,1,73,2,4,6,1466,17.1,514.5,230,1,461,CAL +33,12,0,1,4,0,1,73,2,5,7,1467,51.5,514.5,231,1,463,CAL +33,13,0,1,4,0,1,73,3,2,8,1468,85.8,514.5,232,1,465,CAL +33,14,0,1,4,1,1,75,3,4,10,1510,120.1,514.5,233,1,467,CAL +33,15,0,1,4,1,1,75,3,5,11,1511,154.4,514.5,234,1,469,CAL +33,16,0,1,4,1,1,75,4,2,12,1512,188.6,514.5,235,1,471,CAL +33,17,0,1,4,2,1,77,4,3,13,1553,222.9,514.5,236,1,473,CAL +33,18,0,1,4,2,1,77,4,4,14,1554,257.2,514.5,237,1,475,CAL +33,19,0,1,4,2,1,77,4,5,15,1555,291.5,514.5,329,1,659,CAL +33,20,0,1,4,2,1,77,5,2,16,1556,325.9,514.5,423,1,847,CAL +33,21,0,1,3,0,1,65,1,2,0,1300,360.1,514.5,511,1,1023,CAL +34,0,0,0,4,2,1,37,1,2,0,740,-308.7,548.8,501,1,1003,CAL +34,1,0,0,4,2,1,37,1,3,1,741,-274.4,548.8,312,1,625,CAL +34,2,0,0,4,2,1,37,1,4,2,742,-240.1,548.8,313,1,627,CAL +34,3,0,0,4,1,1,35,1,5,3,703,-205.8,548.8,314,1,629,CAL +34,4,0,0,4,1,1,35,2,2,4,704,-171.5,548.8,315,1,631,CAL +34,5,0,0,4,1,1,35,2,3,5,705,-137.2,548.8,316,1,633,CAL +34,6,0,0,4,1,1,35,2,4,6,706,-102.9,548.8,317,1,635,CAL +34,7,0,0,4,0,1,33,3,2,8,668,-68.6,548.8,318,1,637,CAL +34,8,0,0,4,0,1,33,3,3,9,669,-34.3,548.8,319,1,639,CAL +34,9,0,0,4,0,1,33,3,4,10,670,0,548.8,320,1,641,CAL +34,10,0,1,4,0,1,73,3,3,9,1469,34.3,548.8,321,1,643,CAL +34,11,0,1,4,0,1,73,3,4,10,1470,68.6,548.8,322,1,645,CAL +34,12,0,1,4,0,1,73,3,5,11,1471,102.9,548.8,323,1,647,CAL +34,13,0,1,4,1,1,75,4,3,13,1513,137.2,548.8,324,1,649,CAL +34,14,0,1,4,1,1,75,4,4,14,1514,171.5,548.8,325,1,651,CAL +34,15,0,1,4,1,1,75,4,5,15,1515,205.8,548.8,326,1,653,CAL +34,16,0,1,4,2,1,77,5,3,17,1557,240.1,548.8,327,1,655,CAL +34,17,0,1,4,2,1,77,5,4,18,1558,274.4,548.8,328,1,657,CAL +34,18,0,1,4,2,1,77,5,5,19,1559,308.7,548.8,422,1,845,CAL +35,0,0,0,4,1,1,35,1,2,0,700,-222.9,583.1,408,1,817,CAL +35,1,0,0,4,1,1,35,1,3,1,701,-188.6,583.1,409,1,819,CAL +35,2,0,0,4,1,1,35,1,4,2,702,-154.4,583.1,410,1,821,CAL +35,3,0,0,4,0,1,33,2,2,4,664,-120.1,583.1,411,1,823,CAL +35,4,0,0,4,0,1,33,2,3,5,665,-85.8,583.1,412,1,825,CAL +35,5,0,0,4,0,1,33,2,4,6,666,-51.5,583.1,413,1,827,CAL +35,6,0,0,4,0,1,33,2,5,7,667,-17.1,583.1,414,1,829,CAL +35,7,0,1,4,0,1,73,4,2,12,1472,17.1,583.1,415,1,831,CAL +35,8,0,1,4,0,1,73,4,3,13,1473,51.5,583.1,416,1,833,CAL +35,9,0,1,4,0,1,73,4,4,14,1474,85.8,583.1,417,1,835,CAL +35,10,0,1,4,0,1,73,4,5,15,1475,120.1,583.1,418,1,837,CAL +35,11,0,1,4,1,1,75,5,2,16,1516,154.4,583.1,419,1,839,CAL +35,12,0,1,4,1,1,75,5,3,17,1517,188.6,583.1,420,1,841,CAL +35,13,0,1,4,1,1,75,5,4,18,1518,222.9,583.1,421,1,843,CAL +36,0,0,0,4,0,1,33,1,2,0,660,-137.2,617.4,502,1,1005,CAL +36,1,0,0,4,0,1,33,1,3,1,661,-102.9,617.4,503,1,1007,CAL +36,2,0,0,4,0,1,33,1,4,2,662,-68.6,617.4,504,1,1009,CAL +36,3,0,0,4,0,1,33,1,5,3,663,-34.3,617.4,505,1,1011,CAL +36,4,0,1,4,0,1,73,5,2,16,1476,0,617.4,506,1,1013,CAL +36,5,0,1,4,0,1,73,5,3,17,1477,34.3,617.4,507,1,1015,CAL +36,6,0,1,4,0,1,73,5,4,18,1478,68.6,617.4,508,1,1017,CAL +36,7,0,1,4,0,1,73,5,5,19,1479,102.9,617.4,509,1,1019,CAL +36,8,0,1,4,1,1,75,5,5,19,1519,137.2,617.4,510,1,1021,CAL +-99,-99,0,0,1,1,0,10,5,5,19,219,-700,50,3000,0,2696,PIN-DIODE +-99,-99,0,0,1,0,1,9,5,5,19,199,-700,50,3001,1,2697,PIN-DIODE +-99,-99,0,0,1,0,0,8,5,5,19,179,-700,-50,3011,0,2698,PIN-DIODE +-99,-99,0,0,1,1,1,11,5,5,19,239,-700,-50,3010,1,2699,PIN-DIODE +-99,-99,0,1,1,0,0,48,1,2,0,960,700,-50,3100,0,2700,PIN-DIODE +-99,-99,0,1,1,1,1,51,1,2,0,1020,700,-50,3101,1,2701,PIN-DIODE +-99,-99,0,1,1,1,0,50,1,2,0,1000,700,50,3111,0,2702,PIN-DIODE +-99,-99,0,1,1,0,1,49,1,2,0,980,700,50,3110,1,2703,PIN-DIODE +-99,-99,0,0,1,0,0,8,-99,-99,18,178,-999,-999,9999,0,9999,EMPTY +-99,-99,0,1,1,0,0,48,-99,-99,19,979,-999,-999,9999,0,9999,EMPTY +-99,-99,0,0,1,0,0,9,-99,-99,18,198,-999,-999,9999,0,9999,EMPTY +-99,-99,0,1,1,0,0,49,-99,-99,19,999,-999,-999,9999,0,9999,EMPTY +0,0,1,0,0,2,0,84,5,5,19,1699,-137.2,-617.4,1224,0,2448,CAL +0,1,1,0,0,3,0,86,5,2,16,1736,-102.9,-617.4,1223,0,2446,CAL +0,2,1,0,0,3,0,86,5,3,17,1737,-68.6,-617.4,1222,0,2444,CAL +0,3,1,0,0,3,0,86,5,4,18,1738,-34.3,-617.4,1221,0,2442,CAL +0,4,1,0,0,3,0,86,5,5,19,1739,0,-617.4,1220,0,2440,CAL +0,5,1,1,0,3,0,126,1,2,0,2520,34.3,-617.4,1219,0,2438,CAL +0,6,1,1,0,3,0,126,1,3,1,2521,68.6,-617.4,1218,0,2436,CAL +0,7,1,1,0,3,0,126,1,4,2,2522,102.9,-617.4,1217,0,2434,CAL +0,8,1,1,0,3,0,126,1,5,3,2523,137.2,-617.4,1216,0,2432,CAL +1,0,1,0,0,2,0,84,4,5,15,1695,-222.9,-583.1,1142,0,2284,CAL +1,1,1,0,0,2,0,84,5,2,16,1696,-188.6,-583.1,1141,0,2282,CAL +1,2,1,0,0,2,0,84,5,3,17,1697,-154.4,-583.1,1140,0,2280,CAL +1,3,1,0,0,2,0,84,5,4,18,1698,-120.1,-583.1,1139,0,2278,CAL +1,4,1,0,0,3,0,86,4,3,13,1733,-85.8,-583.1,1138,0,2276,CAL +1,5,1,0,0,3,0,86,4,4,14,1734,-51.5,-583.1,1137,0,2274,CAL +1,6,1,0,0,3,0,86,4,5,15,1735,-17.1,-583.1,1136,0,2272,CAL +1,7,1,1,0,3,0,126,2,2,4,2524,17.1,-583.1,1135,0,2270,CAL +1,8,1,1,0,3,0,126,2,3,5,2525,51.5,-583.1,1134,0,2268,CAL +1,9,1,1,0,3,0,126,2,4,6,2526,85.8,-583.1,1133,0,2266,CAL +1,10,1,1,0,3,0,126,2,5,7,2527,120.1,-583.1,1132,0,2264,CAL +1,11,1,1,0,2,0,124,1,2,0,2480,154.4,-583.1,1131,0,2262,CAL +1,12,1,1,0,2,0,124,1,3,1,2481,188.6,-583.1,1130,0,2260,CAL +1,13,1,1,0,2,0,124,1,4,2,2482,222.9,-583.1,1129,0,2258,CAL +2,0,1,0,0,1,0,82,5,4,18,1658,-308.7,-548.8,1143,0,2286,CAL +2,1,1,0,0,1,0,82,5,5,19,1659,-274.4,-548.8,1050,0,2100,CAL +2,2,1,0,0,2,0,84,3,5,11,1691,-240.1,-548.8,1049,0,2098,CAL +2,3,1,0,0,2,0,84,4,2,12,1692,-205.8,-548.8,1048,0,2096,CAL +2,4,1,0,0,2,0,84,4,3,13,1693,-171.5,-548.8,1047,0,2094,CAL +2,5,1,0,0,2,0,84,4,4,14,1694,-137.2,-548.8,1046,0,2092,CAL +2,6,1,0,0,3,0,86,3,4,10,1730,-102.9,-548.8,1045,0,2090,CAL +2,7,1,0,0,3,0,86,3,5,11,1731,-68.6,-548.8,1044,0,2088,CAL +2,8,1,0,0,3,0,86,4,2,12,1732,-34.3,-548.8,1043,0,2086,CAL +2,9,1,1,0,3,0,126,3,2,8,2528,0,-548.8,1042,0,2084,CAL +2,10,1,1,0,3,0,126,3,3,9,2529,34.3,-548.8,1041,0,2082,CAL +2,11,1,1,0,3,0,126,3,4,10,2530,68.6,-548.8,1040,0,2080,CAL +2,12,1,1,0,3,0,126,3,5,11,2531,102.9,-548.8,1039,0,2078,CAL +2,13,1,1,0,2,0,124,1,5,3,2483,137.2,-548.8,1038,0,2076,CAL +2,14,1,1,0,2,0,124,2,2,4,2484,171.5,-548.8,1037,0,2074,CAL +2,15,1,1,0,2,0,124,2,3,5,2485,205.8,-548.8,1036,0,2072,CAL +2,16,1,1,0,2,0,124,2,4,6,2486,240.1,-548.8,1035,0,2070,CAL +2,17,1,1,0,1,0,122,1,2,0,2440,274.4,-548.8,1034,0,2068,CAL +2,18,1,1,0,1,0,122,1,3,1,2441,308.7,-548.8,1128,0,2256,CAL +3,0,1,0,0,0,0,80,5,5,19,1619,-360.1,-514.5,1225,0,2450,CAL +3,1,1,0,0,1,0,82,4,4,14,1654,-325.9,-514.5,1144,0,2288,CAL +3,2,1,0,0,1,0,82,4,5,15,1655,-291.5,-514.5,1051,0,2102,CAL +3,3,1,0,0,1,0,82,5,2,16,1656,-257.2,-514.5,956,0,1912,CAL +3,4,1,0,0,1,0,82,5,3,17,1657,-222.9,-514.5,955,0,1910,CAL +3,5,1,0,0,2,0,84,3,2,8,1688,-188.6,-514.5,954,0,1908,CAL +3,6,1,0,0,2,0,84,3,3,9,1689,-154.4,-514.5,953,0,1906,CAL +3,7,1,0,0,2,0,84,3,4,10,1690,-120.1,-514.5,952,0,1904,CAL +3,8,1,0,0,3,0,86,2,5,7,1727,-85.8,-514.5,951,0,1902,CAL +3,9,1,0,0,3,0,86,3,2,8,1728,-51.5,-514.5,950,0,1900,CAL +3,10,1,0,0,3,0,86,3,3,9,1729,-17.1,-514.5,949,0,1898,CAL +3,11,1,1,0,3,0,126,4,2,12,2532,17.1,-514.5,948,0,1896,CAL +3,12,1,1,0,3,0,126,4,3,13,2533,51.5,-514.5,947,0,1894,CAL +3,13,1,1,0,2,0,124,2,5,7,2487,85.8,-514.5,946,0,1892,CAL +3,14,1,1,0,2,0,124,3,2,8,2488,120.1,-514.5,945,0,1890,CAL +3,15,1,1,0,2,0,124,3,3,9,2489,154.4,-514.5,944,0,1888,CAL +3,16,1,1,0,2,0,124,3,4,10,2490,188.6,-514.5,943,0,1886,CAL +5,18,1,1,0,1,0,122,1,4,2,2442,188.6,-445.9,774,0,1548,CAL +3,18,1,1,0,1,0,122,1,5,3,2443,257.2,-514.5,941,0,1882,CAL +3,19,1,1,0,1,0,122,2,2,4,2444,291.5,-514.5,1033,0,2066,CAL +3,20,1,1,0,1,0,122,2,3,5,2445,325.9,-514.5,1127,0,2254,CAL +3,21,1,1,0,0,0,120,1,2,0,2400,360.1,-514.5,1215,0,2430,CAL +4,0,1,0,0,0,0,80,4,5,15,1615,-411.6,-480.2,1284,0,2568,CAL +4,1,1,0,0,0,0,80,5,2,16,1616,-377.3,-480.2,1226,0,2452,CAL +4,2,1,0,0,0,0,80,5,3,17,1617,-343,-480.2,1145,0,2290,CAL +4,3,1,0,0,0,0,80,5,4,18,1618,-308.7,-480.2,1052,0,2104,CAL +4,4,1,0,0,1,0,82,3,5,11,1651,-274.4,-480.2,957,0,1914,CAL +4,5,1,0,0,1,0,82,4,2,12,1652,-240.1,-480.2,868,0,1736,CAL +4,6,1,0,0,1,0,82,4,3,13,1653,-205.8,-480.2,867,0,1734,CAL +4,7,1,0,0,2,0,84,2,3,5,1685,-171.5,-480.2,866,0,1732,CAL +4,8,1,0,0,2,0,84,2,4,6,1686,-137.2,-480.2,865,0,1730,CAL +4,9,1,0,0,2,0,84,2,5,7,1687,-102.9,-480.2,864,0,1728,CAL +4,10,1,0,0,3,0,86,2,2,4,1724,-68.6,-480.2,863,0,1726,CAL +4,11,1,0,0,3,0,86,2,3,5,1725,-34.3,-480.2,862,0,1724,CAL +4,12,1,0,0,3,0,86,2,4,6,1726,0,-480.2,861,0,1722,CAL +4,13,1,1,0,3,0,126,4,4,14,2534,34.3,-480.2,860,0,1720,CAL +4,14,1,1,0,3,0,126,4,5,15,2535,68.6,-480.2,859,0,1718,CAL +4,15,1,1,0,2,0,124,3,5,11,2491,102.9,-480.2,858,0,1716,CAL +4,16,1,1,0,2,0,124,4,2,12,2492,137.2,-480.2,857,0,1714,CAL +4,17,1,1,0,2,0,124,4,3,13,2493,171.5,-480.2,856,0,1712,CAL +4,18,1,1,0,1,0,122,2,4,6,2446,205.8,-480.2,855,0,1710,CAL +4,19,1,1,0,1,0,122,2,5,7,2447,240.1,-480.2,854,0,1708,CAL +4,20,1,1,0,1,0,122,3,2,8,2448,274.4,-480.2,940,0,1880,CAL +4,21,1,1,0,1,0,122,3,3,9,2449,308.7,-480.2,1032,0,2064,CAL +4,22,1,1,0,0,0,120,1,3,1,2401,343,-480.2,1126,0,2252,CAL +4,23,1,1,0,0,0,120,1,4,2,2402,377.3,-480.2,1214,0,2428,CAL +4,24,1,1,0,0,0,120,1,5,3,2403,411.6,-480.2,1283,0,2566,CAL +5,0,1,0,1,3,0,94,5,5,19,1899,-428.8,-445.9,1285,0,2570,CAL +5,1,1,0,0,0,0,80,3,5,11,1611,-394.5,-445.9,1227,0,2454,CAL +5,2,1,0,0,0,0,80,4,2,12,1612,-360.1,-445.9,1146,0,2292,CAL +5,3,1,0,0,0,0,80,4,3,13,1613,-325.9,-445.9,1053,0,2106,CAL +5,4,1,0,0,0,0,80,4,4,14,1614,-291.5,-445.9,958,0,1916,CAL +5,5,1,0,0,1,0,82,3,2,8,1648,-257.2,-445.9,869,0,1738,CAL +5,6,1,0,0,1,0,82,3,3,9,1649,-222.9,-445.9,786,0,1572,CAL +5,7,1,0,0,1,0,82,3,4,10,1650,-188.6,-445.9,785,0,1570,CAL +5,8,1,0,0,2,0,84,1,4,2,1682,-154.4,-445.9,784,0,1568,CAL +5,9,1,0,0,2,0,84,1,5,3,1683,-120.1,-445.9,783,0,1566,CAL +5,10,1,0,0,2,0,84,2,2,4,1684,-85.8,-445.9,782,0,1564,CAL +5,11,1,0,0,3,0,86,1,4,2,1722,-51.5,-445.9,781,0,1562,CAL +5,12,1,0,0,3,0,86,1,5,3,1723,-17.1,-445.9,780,0,1560,CAL +5,13,1,1,0,3,0,126,5,2,16,2536,17.1,-445.9,779,0,1558,CAL +5,14,1,1,0,3,0,126,5,3,17,2537,51.5,-445.9,778,0,1556,CAL +5,15,1,1,0,2,0,124,4,4,14,2494,85.8,-445.9,777,0,1554,CAL +5,16,1,1,0,2,0,124,4,5,15,2495,120.1,-445.9,776,0,1552,CAL +5,17,1,1,0,2,0,124,5,2,16,2496,154.4,-445.9,775,0,1550,CAL +3,17,1,1,0,1,0,122,3,4,10,2450,222.9,-514.5,942,0,1884,CAL +5,19,1,1,0,1,0,122,3,5,11,2451,222.9,-445.9,773,0,1546,CAL +5,20,1,1,0,1,0,122,4,2,12,2452,257.2,-445.9,853,0,1706,CAL +5,21,1,1,0,1,0,122,4,3,13,2453,291.5,-445.9,939,0,1878,CAL +5,22,1,1,0,0,0,120,2,2,4,2404,325.9,-445.9,1031,0,2062,CAL +5,23,1,1,0,0,0,120,2,3,5,2405,360.1,-445.9,1125,0,2250,CAL +5,24,1,1,0,0,0,120,2,4,6,2406,394.5,-445.9,1213,0,2426,CAL +5,25,1,1,0,0,0,120,2,5,7,2407,428.8,-445.9,1282,0,2564,CAL +6,0,1,0,1,3,0,94,5,4,18,1898,-480.2,-411.6,1330,0,2660,CAL +6,1,1,0,1,3,0,94,5,3,17,1897,-445.9,-411.6,1286,0,2572,CAL +6,2,1,0,1,3,0,94,5,2,16,1896,-411.6,-411.6,1228,0,2456,CAL +6,3,1,0,0,0,0,80,2,5,7,1607,-377.3,-411.6,1147,0,2294,CAL +6,4,1,0,0,0,0,80,3,2,8,1608,-343,-411.6,1054,0,2108,CAL +6,5,1,0,0,0,0,80,3,3,9,1609,-308.7,-411.6,959,0,1918,CAL +6,6,1,0,0,0,0,80,3,4,10,1610,-274.4,-411.6,870,0,1740,CAL +6,7,1,0,0,1,0,82,2,3,5,1645,-240.1,-411.6,787,0,1574,CAL +6,8,1,0,0,1,0,82,2,4,6,1646,-205.8,-411.6,724,0,1448,CAL +6,9,1,0,0,1,0,82,2,5,7,1647,-171.5,-411.6,723,0,1446,CAL +6,10,1,0,0,2,0,84,1,2,0,1680,-137.2,-411.6,722,0,1444,CAL +6,11,1,0,0,2,0,84,1,3,1,1681,-102.9,-411.6,721,0,1442,CAL +6,12,1,0,0,3,0,86,1,2,0,1720,-68.6,-411.6,720,0,1440,CAL +6,13,1,0,0,3,0,86,1,3,1,1721,-34.3,-411.6,719,0,1438,CAL +6,14,1,1,0,3,0,126,5,4,18,2538,0,-411.6,718,0,1436,CAL +6,15,1,1,0,3,0,126,5,5,19,2539,34.3,-411.6,717,0,1434,CAL +6,16,1,1,0,2,0,124,5,3,17,2497,68.6,-411.6,716,0,1432,CAL +6,17,1,1,0,2,0,124,5,4,18,2498,102.9,-411.6,715,0,1430,CAL +6,18,1,1,0,2,0,124,5,5,19,2499,137.2,-411.6,714,0,1428,CAL +6,19,1,1,0,1,0,122,4,4,14,2454,171.5,-411.6,713,0,1426,CAL +6,20,1,1,0,1,0,122,4,5,15,2455,205.8,-411.6,712,0,1424,CAL +6,21,1,1,0,1,0,122,5,2,16,2456,240.1,-411.6,772,0,1544,CAL +6,22,1,1,0,0,0,120,3,2,8,2408,274.4,-411.6,852,0,1704,CAL +6,23,1,1,0,0,0,120,3,3,9,2409,308.7,-411.6,938,0,1876,CAL +6,24,1,1,0,0,0,120,3,4,10,2410,343,-411.6,1030,0,2060,CAL +6,25,1,1,0,0,0,120,3,5,11,2411,377.3,-411.6,1124,0,2248,CAL +6,26,1,1,1,3,0,134,1,4,2,2682,411.6,-411.6,1212,0,2424,CAL +6,27,1,1,1,3,0,134,1,3,1,2681,445.9,-411.6,1281,0,2562,CAL +6,28,1,1,1,3,0,134,1,2,0,2680,480.2,-411.6,1329,0,2658,CAL +7,0,1,0,1,3,0,94,4,5,15,1895,-497.4,-377.3,1331,0,2662,CAL +7,1,1,0,1,3,0,94,4,4,14,1894,-463,-377.3,1287,0,2574,CAL +7,2,1,0,1,3,0,94,4,3,13,1893,-428.8,-377.3,1229,0,2458,CAL +7,3,1,0,1,3,0,94,4,2,12,1892,-394.5,-377.3,1148,0,2296,CAL +7,4,1,0,1,3,0,94,3,5,11,1891,-360.1,-377.3,1055,0,2110,CAL +7,5,1,0,0,0,0,80,2,2,4,1604,-325.9,-377.3,960,0,1920,CAL +7,6,1,0,0,0,0,80,2,3,5,1605,-291.5,-377.3,871,0,1742,CAL +7,7,1,0,0,0,0,80,2,4,6,1606,-257.2,-377.3,788,0,1576,CAL +7,8,1,0,0,1,0,82,1,3,1,1641,-222.9,-377.3,725,0,1450,CAL +7,9,1,0,0,1,0,82,1,4,2,1642,-188.6,-377.3,687,0,1374,CAL +7,10,1,0,0,1,0,82,1,5,3,1643,-154.4,-377.3,686,0,1372,CAL +7,11,1,0,0,1,0,82,2,2,4,1644,-120.1,-377.3,685,0,1370,CAL +7,12,1,1,0,1,0,122,5,3,17,2457,120.1,-377.3,684,0,1368,CAL +7,13,1,1,0,1,0,122,5,4,18,2458,154.4,-377.3,683,0,1366,CAL +7,14,1,1,0,1,0,122,5,5,19,2459,188.6,-377.3,682,0,1364,CAL +7,15,1,1,0,0,0,120,4,2,12,2412,222.9,-377.3,711,0,1422,CAL +7,16,1,1,0,0,0,120,4,3,13,2413,257.2,-377.3,771,0,1542,CAL +7,17,1,1,0,0,0,120,4,4,14,2414,291.5,-377.3,851,0,1702,CAL +7,18,1,1,0,0,0,120,4,5,15,2415,325.9,-377.3,937,0,1874,CAL +7,19,1,1,1,3,0,134,2,5,7,2687,360.1,-377.3,1029,0,2058,CAL +7,20,1,1,1,3,0,134,2,4,6,2686,394.5,-377.3,1123,0,2246,CAL +7,21,1,1,1,3,0,134,2,3,5,2685,428.8,-377.3,1211,0,2422,CAL +7,22,1,1,1,3,0,134,2,2,4,2684,463,-377.3,1280,0,2560,CAL +7,23,1,1,1,3,0,134,1,5,3,2683,497.4,-377.3,1328,0,2656,CAL +8,0,1,0,1,2,0,92,5,5,19,1859,-514.5,-343,1332,0,2664,CAL +8,1,1,0,1,3,0,94,3,4,10,1890,-480.2,-343,1288,0,2576,CAL +8,2,1,0,1,3,0,94,3,3,9,1889,-445.9,-343,1230,0,2460,CAL +8,3,1,0,1,3,0,94,3,2,8,1888,-411.6,-343,1149,0,2298,CAL +8,4,1,0,1,3,0,94,2,5,7,1887,-377.3,-343,1056,0,2112,CAL +8,5,1,0,1,3,0,94,2,4,6,1886,-343,-343,961,0,1922,CAL +8,6,1,0,0,0,0,80,1,3,1,1601,-308.7,-343,872,0,1744,CAL +8,7,1,0,0,0,0,80,1,4,2,1602,-274.4,-343,789,0,1578,CAL +8,8,1,0,0,0,0,80,1,5,3,1603,-240.1,-343,726,0,1452,CAL +8,9,1,0,0,1,0,82,1,2,0,1640,-205.8,-343,688,0,1376,CAL +8,10,1,1,0,0,0,120,5,2,16,2416,205.8,-343,681,0,1362,CAL +8,11,1,1,0,0,0,120,5,3,17,2417,240.1,-343,710,0,1420,CAL +8,12,1,1,0,0,0,120,5,4,18,2418,274.4,-343,770,0,1540,CAL +8,13,1,1,1,3,0,134,4,2,12,2692,308.7,-343,850,0,1700,CAL +8,14,1,1,1,3,0,134,3,5,11,2691,343,-343,936,0,1872,CAL +8,15,1,1,1,3,0,134,3,4,10,2690,377.3,-343,1028,0,2056,CAL +8,16,1,1,1,3,0,134,3,3,9,2689,411.6,-343,1122,0,2244,CAL +8,17,1,1,1,3,0,134,3,2,8,2688,445.9,-343,1210,0,2420,CAL +8,18,1,1,1,2,0,132,1,3,1,2641,480.2,-343,1279,0,2558,CAL +8,19,1,1,1,2,0,132,1,2,0,2640,514.5,-343,1327,0,2654,CAL +9,0,1,0,1,2,0,92,5,4,18,1858,-531.7,-308.7,1333,0,2666,CAL +9,1,1,0,1,2,0,92,5,3,17,1857,-497.4,-308.7,1289,0,2578,CAL +9,2,1,0,1,2,0,92,5,2,16,1856,-463,-308.7,1231,0,2462,CAL +9,3,1,0,1,2,0,92,4,5,15,1855,-428.8,-308.7,1150,0,2300,CAL +9,4,1,0,1,3,0,94,2,3,5,1885,-394.5,-308.7,1057,0,2114,CAL +9,5,1,0,1,3,0,94,2,2,4,1884,-360.1,-308.7,962,0,1924,CAL +9,6,1,0,1,3,0,94,1,5,3,1883,-325.9,-308.7,873,0,1746,CAL +9,7,1,0,1,3,0,94,1,4,2,1882,-291.5,-308.7,790,0,1580,CAL +9,8,1,0,0,0,0,80,1,2,0,1600,-257.2,-308.7,727,0,1454,CAL +9,9,1,1,0,0,0,120,5,5,19,2419,257.2,-308.7,709,0,1418,CAL +9,10,1,1,1,3,0,134,5,2,16,2696,291.5,-308.7,769,0,1538,CAL +9,11,1,1,1,3,0,134,4,5,15,2695,325.9,-308.7,849,0,1698,CAL +9,12,1,1,1,3,0,134,4,4,14,2694,360.1,-308.7,935,0,1870,CAL +9,13,1,1,1,3,0,134,4,3,13,2693,394.5,-308.7,1027,0,2054,CAL +9,14,1,1,1,2,0,132,2,3,5,2645,428.8,-308.7,1121,0,2242,CAL +9,15,1,1,1,2,0,132,2,2,4,2644,463,-308.7,1209,0,2418,CAL +9,16,1,1,1,2,0,132,1,5,3,2643,497.4,-308.7,1278,0,2556,CAL +9,17,1,1,1,2,0,132,1,4,2,2642,531.7,-308.7,1326,0,2652,CAL +10,0,1,0,1,2,0,92,4,4,14,1854,-548.8,-274.4,1334,0,2668,CAL +10,1,1,0,1,2,0,92,4,3,13,1853,-514.5,-274.4,1290,0,2580,CAL +10,2,1,0,1,2,0,92,4,2,12,1852,-480.2,-274.4,1232,0,2464,CAL +10,3,1,0,1,2,0,92,3,5,11,1851,-445.9,-274.4,1151,0,2302,CAL +10,4,1,0,1,2,0,92,3,4,10,1850,-411.6,-274.4,1058,0,2116,CAL +10,5,1,0,1,2,0,92,3,3,9,1849,-377.3,-274.4,963,0,1926,CAL +10,6,1,0,1,3,0,94,1,3,1,1881,-343,-274.4,874,0,1748,CAL +10,7,1,0,1,3,0,94,1,2,0,1880,-308.7,-274.4,791,0,1582,CAL +10,8,1,1,1,3,0,134,5,5,19,2699,308.7,-274.4,768,0,1536,CAL +10,9,1,1,1,3,0,134,5,4,18,2698,343,-274.4,848,0,1696,CAL +10,10,1,1,1,3,0,134,5,3,17,2697,377.3,-274.4,934,0,1868,CAL +10,11,1,1,1,2,0,132,3,4,10,2650,411.6,-274.4,1026,0,2052,CAL +10,12,1,1,1,2,0,132,3,3,9,2649,445.9,-274.4,1120,0,2240,CAL +10,13,1,1,1,2,0,132,3,2,8,2648,480.2,-274.4,1208,0,2416,CAL +10,14,1,1,1,2,0,132,2,5,7,2647,514.5,-274.4,1277,0,2554,CAL +10,15,1,1,1,2,0,132,2,4,6,2646,548.8,-274.4,1325,0,2650,CAL +11,0,1,0,1,1,0,90,5,4,18,1818,-566,-240.1,1335,0,2670,CAL +11,1,1,0,1,1,0,90,5,3,17,1817,-531.7,-240.1,1291,0,2582,CAL +11,2,1,0,1,1,0,90,5,2,16,1816,-497.4,-240.1,1233,0,2466,CAL +11,3,1,0,1,2,0,92,3,2,8,1848,-463,-240.1,1152,0,2304,CAL +11,4,1,0,1,2,0,92,2,5,7,1847,-428.8,-240.1,1059,0,2118,CAL +11,5,1,0,1,2,0,92,2,4,6,1846,-394.5,-240.1,964,0,1928,CAL +11,6,1,0,1,2,0,92,2,3,5,1845,-360.1,-240.1,875,0,1750,CAL +11,7,1,0,1,2,0,92,2,2,4,1844,-325.9,-240.1,792,0,1584,CAL +11,8,1,1,1,2,0,132,4,5,12,2655,325.9,-240.1,767,0,1534,CAL +11,9,1,1,1,2,0,132,4,4,13,2654,360.1,-240.1,847,0,1694,CAL +11,10,1,1,1,2,0,132,4,3,14,2653,394.5,-240.1,933,0,1866,CAL +11,11,1,1,1,2,0,132,4,2,15,2652,428.8,-240.1,1025,0,2050,CAL +11,12,1,1,1,2,0,132,3,5,11,2651,463,-240.1,1119,0,2238,CAL +11,13,1,1,1,1,0,130,1,5,3,2603,497.4,-240.1,1207,0,2414,CAL +11,14,1,1,1,1,0,130,1,4,2,2602,531.7,-240.1,1276,0,2552,CAL +11,15,1,1,1,1,0,130,1,3,1,2601,566,-240.1,1324,0,2648,CAL +12,0,1,0,1,1,0,90,4,5,15,1815,-583.1,-205.8,1336,0,2672,CAL +12,1,1,0,1,1,0,90,4,4,14,1814,-548.8,-205.8,1292,0,2584,CAL +12,2,1,0,1,1,0,90,4,3,13,1813,-514.5,-205.8,1234,0,2468,CAL +12,3,1,0,1,1,0,90,4,2,12,1812,-480.2,-205.8,1153,0,2306,CAL +12,4,1,0,1,2,0,92,1,5,3,1843,-445.9,-205.8,1060,0,2120,CAL +12,5,1,0,1,2,0,92,1,4,2,1842,-411.6,-205.8,965,0,1930,CAL +12,6,1,0,1,2,0,92,1,3,1,1841,-377.3,-205.8,876,0,1752,CAL +12,7,1,0,1,2,0,92,1,2,0,1840,-343,-205.8,793,0,1586,CAL +12,8,1,1,1,2,0,132,5,5,19,2659,343,-205.8,766,0,1532,CAL +12,9,1,1,1,2,0,132,5,4,18,2658,377.3,-205.8,846,0,1692,CAL +12,10,1,1,1,2,0,132,5,3,17,2657,411.6,-205.8,932,0,1864,CAL +12,11,1,1,1,2,0,132,5,2,16,2656,445.9,-205.8,1024,0,2048,CAL +12,12,1,1,1,1,0,130,2,5,7,2607,480.2,-205.8,1118,0,2236,CAL +12,13,1,1,1,1,0,130,2,4,6,2606,514.5,-205.8,1206,0,2412,CAL +12,14,1,1,1,1,0,130,2,3,5,2605,548.8,-205.8,1275,0,2550,CAL +12,15,1,1,1,1,0,130,2,2,4,2604,583.1,-205.8,1323,0,2646,CAL +13,0,1,0,1,1,0,90,3,5,11,1811,-600.2,-171.5,1337,0,2674,CAL +13,1,1,0,1,1,0,90,3,4,10,1810,-566,-171.5,1293,0,2586,CAL +13,2,1,0,1,1,0,90,3,3,9,1809,-531.7,-171.5,1235,0,2470,CAL +13,3,1,0,1,1,0,90,3,2,8,1808,-497.4,-171.5,1154,0,2308,CAL +13,4,1,0,1,1,0,90,2,5,7,1807,-463,-171.5,1061,0,2122,CAL +13,5,1,0,1,1,0,90,2,4,6,1806,-428.8,-171.5,966,0,1932,CAL +13,6,1,0,1,1,0,90,2,3,5,1805,-394.5,-171.5,877,0,1754,CAL +13,7,1,0,1,1,0,90,2,2,4,1804,-360.1,-171.5,794,0,1588,CAL +13,8,1,1,1,1,0,130,4,5,15,2615,360.1,-171.5,765,0,1530,CAL +13,9,1,1,1,1,0,130,4,4,14,2614,394.5,-171.5,845,0,1690,CAL +13,10,1,1,1,1,0,130,4,3,13,2613,428.8,-171.5,931,0,1862,CAL +13,11,1,1,1,1,0,130,4,2,12,2612,463,-171.5,1023,0,2046,CAL +13,12,1,1,1,1,0,130,3,5,11,2611,497.4,-171.5,1117,0,2234,CAL +13,13,1,1,1,1,0,130,3,4,10,2610,531.7,-171.5,1205,0,2410,CAL +13,14,1,1,1,1,0,130,3,3,9,2609,566,-171.5,1274,0,2548,CAL +13,15,1,1,1,1,0,130,3,2,8,2608,600.2,-171.5,1322,0,2644,CAL +14,0,1,0,1,0,0,88,5,3,17,1777,-617.4,-137.2,1338,0,2676,CAL +14,1,1,0,1,0,0,88,4,4,14,1774,-583.1,-137.2,1294,0,2588,CAL +14,2,1,0,1,0,0,88,3,5,11,1771,-548.8,-137.2,1236,0,2472,CAL +14,3,1,0,1,0,0,88,3,2,8,1768,-514.5,-137.2,1155,0,2310,CAL +14,4,1,0,1,1,0,90,1,5,3,1803,-480.2,-137.2,1062,0,2124,CAL +14,5,1,0,1,1,0,90,1,4,2,1802,-445.9,-137.2,967,0,1934,CAL +14,6,1,0,1,1,1,90,1,3,1,1801,-411.6,-137.2,878,1,1757,CAL +14,7,1,0,1,1,0,90,1,2,0,1800,-377.3,-137.2,795,0,1590,CAL +14,8,1,1,1,1,0,130,5,5,19,2619,377.3,-137.2,764,0,1528,CAL +14,9,1,1,1,1,0,130,5,4,18,2618,411.6,-137.2,844,0,1688,CAL +14,10,1,1,1,1,0,130,5,3,17,2617,445.9,-137.2,930,0,1860,CAL +14,11,1,1,1,1,0,130,5,2,16,2616,480.2,-137.2,1022,0,2044,CAL +16,11,1,1,1,0,0,128,3,4,10,2570,548.8,-68.6,1114,0,2228,CAL +14,13,1,1,1,0,0,128,2,5,7,2567,548.8,-137.2,1204,0,2408,CAL +14,14,1,1,1,0,0,128,2,2,4,2564,583.1,-137.2,1273,0,2546,CAL +14,15,1,1,1,0,0,128,1,3,1,2561,617.4,-137.2,1321,0,2642,CAL +15,0,1,0,1,0,0,88,5,2,16,1776,-600.2,-102.9,1295,0,2590,CAL +15,1,1,0,1,0,0,88,4,3,13,1773,-566,-102.9,1237,0,2474,CAL +15,2,1,0,1,0,0,88,3,4,10,1770,-531.7,-102.9,1156,0,2312,CAL +15,3,1,0,1,0,0,88,2,5,7,1767,-497.4,-102.9,1063,0,2126,CAL +15,4,1,0,1,0,0,88,2,3,5,1765,-463,-102.9,968,0,1936,CAL +15,5,1,0,1,0,0,88,1,5,3,1763,-428.8,-102.9,879,0,1758,CAL +15,6,1,0,1,0,0,88,1,3,1,1761,-394.5,-102.9,796,0,1592,CAL +15,7,1,1,1,0,0,128,5,3,17,2577,394.5,-102.9,763,0,1526,CAL +15,8,1,1,1,0,0,128,4,5,15,2575,428.8,-102.9,843,0,1686,CAL +15,9,1,1,1,0,0,128,4,3,13,2573,463,-102.9,929,0,1858,CAL +15,10,1,1,1,0,0,128,3,5,11,2571,497.4,-102.9,1021,0,2042,CAL +14,12,1,1,1,0,0,128,3,2,8,2568,514.5,-137.2,1116,0,2232,CAL +15,12,1,1,1,0,0,128,2,3,5,2565,566,-102.9,1203,0,2406,CAL +15,13,1,1,1,0,0,128,1,4,2,2562,600.2,-102.9,1272,0,2544,CAL +16,0,1,0,1,0,0,88,4,5,15,1775,-617.4,-68.6,1296,0,2592,CAL +16,1,1,0,1,0,0,88,4,2,12,1772,-583.1,-68.6,1238,0,2476,CAL +16,2,1,0,1,0,0,88,3,3,9,1769,-548.8,-68.6,1157,0,2314,CAL +16,3,1,0,1,0,0,88,2,4,6,1766,-514.5,-68.6,1064,0,2128,CAL +16,4,1,0,1,0,0,88,2,2,4,1764,-480.2,-68.6,969,0,1938,CAL +16,5,1,0,1,0,0,88,1,4,2,1762,-445.9,-68.6,880,0,1760,CAL +16,6,1,0,1,0,0,88,1,2,0,1760,-411.6,-68.6,797,0,1594,CAL +16,7,1,1,1,0,0,128,5,4,18,2578,411.6,-68.6,762,0,1524,CAL +16,8,1,1,1,0,0,128,5,2,16,2576,445.9,-68.6,842,0,1684,CAL +16,9,1,1,1,0,0,128,4,4,14,2574,480.2,-68.6,928,0,1856,CAL +16,10,1,1,1,0,0,128,4,2,12,2572,514.5,-68.6,1020,0,2040,CAL +15,11,1,1,1,0,0,128,3,3,9,2569,531.7,-102.9,1115,0,2230,CAL +16,12,1,1,1,0,0,128,2,4,6,2566,583.1,-68.6,1202,0,2404,CAL +16,13,1,1,1,0,0,128,1,5,3,2563,617.4,-68.6,1271,0,2542,CAL +17,0,1,0,2,2,0,100,5,5,19,2019,-634.5,-34.3,1297,0,2594,CAL +17,1,1,0,2,2,0,100,5,4,18,2018,-600.2,-34.3,1239,0,2478,CAL +17,2,1,0,2,2,0,100,5,2,16,2016,-566,-34.3,1158,0,2316,CAL +17,3,1,0,2,2,0,100,4,4,14,2014,-531.7,-34.3,1065,0,2130,CAL +17,4,1,0,2,2,0,100,3,5,11,2011,-497.4,-34.3,970,0,1940,CAL +17,5,1,0,2,2,0,100,3,2,8,2008,-463,-34.3,881,0,1762,CAL +17,6,1,0,2,2,0,100,2,3,5,2005,-428.8,-34.3,798,0,1596,CAL +17,7,1,0,2,2,0,100,1,4,2,2002,-394.5,-34.3,728,0,1456,CAL +17,8,1,1,2,2,0,140,5,5,19,2819,394.5,-34.3,708,0,1416,CAL +17,9,1,1,2,2,0,140,5,2,16,2816,428.8,-34.3,761,0,1522,CAL +17,10,1,1,2,2,0,140,4,3,13,2813,463,-34.3,841,0,1682,CAL +17,11,1,1,2,2,0,140,3,4,10,2810,497.4,-34.3,927,0,1854,CAL +17,12,1,1,2,2,0,140,2,5,7,2807,531.7,-34.3,1019,0,2038,CAL +17,13,1,1,2,2,0,140,2,2,4,2804,566,-34.3,1113,0,2226,CAL +17,14,1,1,2,2,0,140,1,4,2,2802,600.2,-34.3,1201,0,2402,CAL +17,15,1,1,2,2,0,140,1,2,0,2800,634.5,-34.3,1270,0,2540,CAL +18,0,1,0,2,2,0,100,5,3,17,2017,-617.4,0,1240,0,2480,CAL +18,1,1,0,2,2,0,100,4,5,15,2015,-583.1,0,1159,0,2318,CAL +18,2,1,0,2,2,0,100,4,3,13,2013,-548.8,0,1066,0,2132,CAL +18,3,1,0,2,2,0,100,3,4,10,2010,-514.5,0,971,0,1942,CAL +18,4,1,0,2,2,0,100,2,5,7,2007,-480.2,0,882,0,1764,CAL +18,5,1,0,2,2,0,100,2,2,4,2004,-445.9,0,799,0,1598,CAL +18,6,1,0,2,2,0,100,1,3,1,2001,-411.6,0,729,0,1458,CAL +18,7,1,1,2,2,0,140,5,3,17,2817,411.6,0,707,0,1414,CAL +18,8,1,1,2,2,0,140,4,4,14,2814,445.9,0,760,0,1520,CAL +18,9,1,1,2,2,0,140,3,5,11,2811,480.2,0,840,0,1680,CAL +18,10,1,1,2,2,0,140,3,2,8,2808,514.5,0,926,0,1852,CAL +18,11,1,1,2,2,0,140,2,3,5,2805,548.8,0,1018,0,2036,CAL +18,12,1,1,2,2,0,140,1,5,3,2803,583.1,0,1112,0,2224,CAL +18,13,1,1,2,2,0,140,1,3,1,2801,617.4,0,1200,0,2400,CAL +19,0,1,0,2,1,0,98,5,5,19,1979,-634.5,34.3,1298,0,2596,CAL +19,1,1,0,2,1,0,98,4,5,15,1975,-600.2,34.3,1241,0,2482,CAL +19,2,1,0,2,1,0,98,4,4,14,1974,-566,34.3,1160,0,2320,CAL +19,3,1,0,2,2,0,100,4,2,12,2012,-531.7,34.3,1067,0,2134,CAL +19,4,1,0,2,2,0,100,3,3,9,2009,-497.4,34.3,972,0,1944,CAL +19,5,1,0,2,2,0,100,2,4,6,2006,-463,34.3,883,0,1766,CAL +19,6,1,0,2,2,0,100,1,5,3,2003,-428.8,34.3,800,0,1600,CAL +19,7,1,0,2,2,0,100,1,2,0,2000,-394.5,34.3,730,0,1460,CAL +19,8,1,1,2,2,0,140,5,4,18,2818,394.5,34.3,706,0,1412,CAL +19,9,1,1,2,2,0,140,4,5,15,2815,428.8,34.3,759,0,1518,CAL +19,10,1,1,2,2,0,140,4,2,12,2812,463,34.3,839,0,1678,CAL +19,11,1,1,2,2,0,140,3,3,9,2809,497.4,34.3,925,0,1850,CAL +19,12,1,1,2,2,0,140,2,4,6,2806,531.7,34.3,1017,0,2034,CAL +19,13,1,1,2,1,0,138,1,4,2,2762,566,34.3,1111,0,2222,CAL +19,14,1,1,2,1,0,138,1,3,1,2761,600.2,34.3,1199,0,2398,CAL +19,15,1,1,2,1,0,138,1,2,0,2760,634.5,34.3,1269,0,2538,CAL +20,0,1,0,2,1,0,98,5,4,18,1978,-617.4,68.6,1299,0,2598,CAL +20,1,1,0,2,1,0,98,4,3,13,1973,-583.1,68.6,1242,0,2484,CAL +20,2,1,0,2,1,0,98,4,2,12,1972,-548.8,68.6,1161,0,2322,CAL +20,3,1,0,2,1,0,98,2,5,7,1967,-514.5,68.6,1068,0,2136,CAL +20,4,1,0,2,1,0,98,2,4,6,1966,-480.2,68.6,973,0,1946,CAL +20,5,1,0,2,1,0,98,1,5,3,1963,-445.9,68.6,884,0,1768,CAL +20,6,1,0,2,1,0,98,1,4,2,1962,-411.6,68.6,801,0,1602,CAL +20,7,1,1,2,1,0,138,5,4,18,2778,411.6,68.6,758,0,1516,CAL +20,8,1,1,2,1,0,138,5,2,16,2776,445.9,68.6,838,0,1676,CAL +20,9,1,1,2,1,0,138,4,4,14,2774,480.2,68.6,924,0,1848,CAL +20,10,1,1,2,1,0,138,4,2,12,2772,514.5,68.6,1016,0,2032,CAL +20,11,1,1,2,1,0,138,3,3,9,2769,548.8,68.6,1110,0,2220,CAL +20,12,1,1,2,1,0,138,2,4,6,2766,583.1,68.6,1198,0,2396,CAL +20,13,1,1,2,1,0,138,1,5,3,2763,617.4,68.6,1268,0,2536,CAL +21,0,1,0,2,1,0,98,5,3,17,1977,-600.2,102.9,1300,0,2600,CAL +21,1,1,0,2,1,0,98,3,3,9,1969,-566,102.9,1243,0,2486,CAL +21,2,1,0,2,1,0,98,3,2,8,1968,-531.7,102.9,1162,0,2324,CAL +21,3,1,0,2,1,0,98,2,3,5,1965,-497.4,102.9,1069,0,2138,CAL +21,4,1,0,2,1,0,98,2,2,4,1964,-463,102.9,974,0,1948,CAL +21,5,1,0,2,1,0,98,1,3,1,1961,-428.8,102.9,885,0,1770,CAL +21,6,1,0,2,1,0,98,1,2,0,1960,-394.5,102.9,802,0,1604,CAL +21,7,1,1,2,1,0,138,5,5,19,2779,394.5,102.9,757,0,1514,CAL +21,8,1,1,2,1,0,138,5,3,17,2777,428.8,102.9,837,0,1674,CAL +21,9,1,1,2,1,0,138,4,5,15,2775,463,102.9,923,0,1846,CAL +21,10,1,1,2,1,0,138,4,3,13,2773,497.4,102.9,1015,0,2030,CAL +21,11,1,1,2,1,0,138,3,4,10,2770,531.7,102.9,1109,0,2218,CAL +21,12,1,1,2,1,0,138,2,5,7,2767,566,102.9,1197,0,2394,CAL +21,13,1,1,2,1,0,138,2,2,4,2764,600.2,102.9,1267,0,2534,CAL +22,0,1,0,2,1,0,98,5,2,16,1976,-617.4,137.2,1339,0,2678,CAL +22,1,1,0,2,1,0,98,3,5,11,1971,-583.1,137.2,1301,0,2602,CAL +22,2,1,0,2,1,0,98,3,4,10,1970,-548.8,137.2,1244,0,2488,CAL +22,3,1,0,2,0,0,96,4,2,12,1932,-514.5,137.2,1163,0,2326,CAL +22,4,1,0,2,0,0,96,3,3,9,1929,-480.2,137.2,1070,0,2140,CAL +22,5,1,0,2,0,0,96,2,4,6,1926,-445.9,137.2,975,0,1950,CAL +22,6,1,0,2,0,0,96,1,5,3,1923,-411.6,137.2,886,0,1772,CAL +22,7,1,0,2,0,0,96,1,3,1,1921,-377.3,137.2,803,0,1606,CAL +22,8,1,1,2,0,0,136,5,4,18,2738,377.3,137.2,756,0,1512,CAL +22,9,1,1,2,0,0,136,5,2,16,2736,411.6,137.2,836,0,1672,CAL +22,10,1,1,2,0,0,136,4,3,13,2733,445.9,137.2,922,0,1844,CAL +22,11,1,1,2,0,0,136,3,4,10,2730,480.2,137.2,1014,0,2028,CAL +22,12,1,1,2,0,0,136,2,5,7,2727,514.5,137.2,1108,0,2216,CAL +22,13,1,1,2,1,0,138,3,5,11,2771,548.8,137.2,1196,0,2392,CAL +22,14,1,1,2,1,0,138,3,2,8,2768,583.1,137.2,1266,0,2532,CAL +22,15,1,1,2,1,0,138,2,3,5,2765,617.4,137.2,1320,0,2640,CAL +23,0,1,0,2,0,0,96,5,5,19,1939,-600.2,171.5,1340,0,2680,CAL +23,1,1,0,2,0,0,96,5,2,16,1936,-566,171.5,1302,0,2604,CAL +23,2,1,0,2,0,0,96,4,4,14,1934,-531.7,171.5,1245,0,2490,CAL +23,3,1,0,2,0,0,96,3,5,11,1931,-497.4,171.5,1164,0,2328,CAL +23,4,1,0,2,0,0,96,3,2,8,1928,-463,171.5,1071,0,2142,CAL +23,5,1,0,2,0,0,96,2,3,5,1925,-428.8,171.5,976,0,1952,CAL +23,6,1,0,2,0,0,96,1,4,2,1922,-394.5,171.5,887,0,1774,CAL +23,7,1,0,2,0,0,96,1,2,0,1920,-360.1,171.5,804,0,1608,CAL +23,8,1,1,2,0,0,136,5,5,19,2739,360.1,171.5,755,0,1510,CAL +23,9,1,1,2,0,0,136,5,3,17,2737,394.5,171.5,835,0,1670,CAL +23,10,1,1,2,0,0,136,4,4,14,2734,428.8,171.5,921,0,1842,CAL +23,11,1,1,2,0,0,136,3,5,11,2731,463,171.5,1013,0,2026,CAL +23,12,1,1,2,0,0,136,3,2,8,2728,497.4,171.5,1107,0,2214,CAL +23,13,1,1,2,0,0,136,2,3,5,2725,531.7,171.5,1195,0,2390,CAL +23,14,1,1,2,0,0,136,1,5,3,2723,566,171.5,1265,0,2530,CAL +23,15,1,1,2,0,0,136,1,2,0,2720,600.2,171.5,1319,0,2638,CAL +24,0,1,0,2,0,0,96,5,4,18,1938,-583.1,205.8,1341,0,2682,CAL +24,1,1,0,2,0,0,96,4,5,15,1935,-548.8,205.8,1303,0,2606,CAL +24,2,1,0,2,0,0,96,4,3,13,1933,-514.5,205.8,1246,0,2492,CAL +24,3,1,0,2,0,0,96,3,4,10,1930,-480.2,205.8,1165,0,2330,CAL +24,4,1,0,2,0,0,96,2,5,7,1927,-445.9,205.8,1072,0,2144,CAL +24,5,1,0,2,0,0,96,2,2,4,1924,-411.6,205.8,977,0,1954,CAL +24,6,1,0,3,2,0,108,1,3,1,2161,-377.3,205.8,888,0,1776,CAL +24,7,1,0,3,2,0,108,1,2,0,2160,-343,205.8,805,0,1610,CAL +24,8,1,1,3,2,0,148,5,5,19,2979,343,205.8,754,0,1508,CAL +24,9,1,1,3,2,0,148,5,4,18,2978,377.3,205.8,834,0,1668,CAL +24,10,1,1,2,0,0,136,4,5,15,2735,411.6,205.8,920,0,1840,CAL +24,11,1,1,2,0,0,136,4,2,12,2732,445.9,205.8,1012,0,2024,CAL +24,12,1,1,2,0,0,136,3,3,9,2729,480.2,205.8,1106,0,2212,CAL +24,13,1,1,2,0,0,136,2,4,6,2726,514.5,205.8,1194,0,2388,CAL +24,14,1,1,2,0,0,136,2,2,4,2724,548.8,205.8,1264,0,2528,CAL +24,15,1,1,2,0,0,136,1,3,1,2721,583.1,205.8,1318,0,2636,CAL +25,0,1,0,2,0,0,96,5,3,17,1937,-566,240.1,1342,0,2684,CAL +25,1,1,0,3,2,0,108,5,4,18,2178,-531.7,240.1,1304,0,2608,CAL +25,2,1,0,3,2,0,108,5,3,17,2177,-497.4,240.1,1247,0,2494,CAL +25,3,1,0,3,2,1,108,5,2,16,2176,-463,240.1,1166,1,2333,CAL +25,4,1,0,3,2,0,108,2,3,5,2165,-428.8,240.1,1073,0,2146,CAL +25,5,1,0,3,2,0,108,2,2,4,2164,-394.5,240.1,978,0,1956,CAL +25,6,1,0,3,2,0,108,1,5,3,2163,-360.1,240.1,889,0,1778,CAL +25,7,1,0,3,2,0,108,1,4,2,2162,-325.9,240.1,806,0,1612,CAL +25,8,1,1,3,2,0,148,5,3,17,2977,325.9,240.1,753,0,1506,CAL +25,9,1,1,3,2,0,148,5,2,16,2976,360.1,240.1,833,0,1666,CAL +25,10,1,1,3,2,0,148,4,5,15,2975,394.5,240.1,919,0,1838,CAL +25,11,1,1,3,2,0,148,4,4,14,2974,428.8,240.1,1011,0,2022,CAL +25,12,1,1,3,2,0,148,1,5,3,2963,463,240.1,1105,0,2210,CAL +25,13,1,1,3,2,0,148,1,4,2,2962,497.4,240.1,1193,0,2386,CAL +25,14,1,1,3,2,0,148,1,3,1,2961,531.7,240.1,1263,0,2526,CAL +25,15,1,1,2,0,0,136,1,4,2,2722,566,240.1,1317,0,2634,CAL +26,0,1,0,3,2,0,108,5,5,19,2179,-548.8,274.4,1343,0,2686,CAL +26,1,1,0,3,2,0,108,4,4,14,2174,-514.5,274.4,1305,0,2610,CAL +26,2,1,0,3,2,0,108,4,3,13,2173,-480.2,274.4,1248,0,2496,CAL +26,3,1,0,3,2,0,108,4,2,12,2172,-445.9,274.4,1167,0,2334,CAL +26,4,1,0,3,2,0,108,2,5,7,2167,-411.6,274.4,1074,0,2148,CAL +26,5,1,0,3,2,0,108,2,4,6,2166,-377.3,274.4,979,0,1958,CAL +26,6,1,0,3,1,0,106,1,3,1,2121,-343,274.4,890,0,1780,CAL +26,7,1,0,3,1,0,106,1,2,0,2120,-308.7,274.4,807,0,1614,CAL +26,8,1,1,3,1,0,146,5,5,19,2939,308.7,274.4,752,0,1504,CAL +26,9,1,1,3,1,0,146,5,4,18,2938,343,274.4,832,0,1664,CAL +26,10,1,1,3,2,0,148,4,3,13,2973,377.3,274.4,918,0,1836,CAL +26,11,1,1,3,2,0,148,4,2,12,2972,411.6,274.4,1010,0,2020,CAL +26,12,1,1,3,2,0,148,2,5,7,2967,445.9,274.4,1104,0,2208,CAL +26,13,1,1,3,2,0,148,2,4,6,2966,480.2,274.4,1192,0,2384,CAL +26,14,1,1,3,2,0,148,2,3,5,2965,514.5,274.4,1262,0,2524,CAL +26,15,1,1,3,2,0,148,1,2,0,2960,548.8,274.4,1316,0,2632,CAL +27,0,1,0,3,2,0,108,4,5,15,2175,-531.7,308.7,1344,0,2688,CAL +27,1,1,0,3,2,0,108,3,4,10,2170,-497.4,308.7,1306,0,2612,CAL +27,2,1,0,3,2,0,108,3,3,9,2169,-463,308.7,1249,0,2498,CAL +27,3,1,0,3,2,0,108,3,2,8,2168,-428.8,308.7,1168,0,2336,CAL +27,4,1,0,3,1,0,106,2,4,6,2126,-394.5,308.7,1075,0,2150,CAL +27,5,1,0,3,1,0,106,2,3,5,2125,-360.1,308.7,980,0,1960,CAL +27,6,1,0,3,1,0,106,2,2,4,2124,-325.9,308.7,891,0,1782,CAL +27,7,1,0,3,1,0,106,1,5,3,2123,-291.5,308.7,808,0,1616,CAL +27,8,1,0,3,1,0,106,1,4,2,2122,-257.2,308.7,731,0,1462,CAL +27,9,1,1,3,1,0,146,5,3,17,2937,257.2,308.7,705,0,1410,CAL +27,10,1,1,3,1,0,146,5,2,16,2936,291.5,308.7,751,0,1502,CAL +27,11,1,1,3,1,0,146,4,5,15,2935,325.9,308.7,831,0,1662,CAL +27,12,1,1,3,1,0,146,4,4,14,2934,360.1,308.7,917,0,1834,CAL +27,13,1,1,3,1,0,146,4,3,13,2933,394.5,308.7,1009,0,2018,CAL +27,14,1,1,3,2,0,148,3,5,11,2971,428.8,308.7,1103,0,2206,CAL +27,15,1,1,3,2,0,148,3,4,10,2970,463,308.7,1191,0,2382,CAL +27,16,1,1,3,2,0,148,3,3,9,2969,497.4,308.7,1261,0,2522,CAL +27,17,1,1,3,2,0,148,2,2,4,2964,531.7,308.7,1315,0,2630,CAL +28,0,1,0,3,2,0,108,3,5,11,2171,-514.5,343,1345,0,2690,CAL +28,1,1,0,3,1,0,106,3,5,11,2131,-480.2,343,1307,0,2614,CAL +28,2,1,0,3,1,0,106,3,4,10,2130,-445.9,343,1250,0,2500,CAL +28,3,1,0,3,1,0,106,3,3,9,2129,-411.6,343,1169,0,2338,CAL +28,4,1,0,3,1,0,106,3,2,8,2128,-377.3,343,1076,0,2152,CAL +28,5,1,0,3,1,0,106,2,5,7,2127,-343,343,981,0,1962,CAL +28,6,1,0,3,0,0,104,1,5,3,2083,-308.7,343,892,0,1784,CAL +28,7,1,0,3,0,0,104,1,4,2,2082,-274.4,343,809,0,1618,CAL +28,8,1,0,3,0,0,104,1,3,1,2081,-240.1,343,732,0,1464,CAL +28,9,1,0,3,0,0,104,1,2,0,2080,-205.8,343,689,0,1378,CAL +28,10,1,1,3,0,0,144,5,5,19,2899,205.8,343,680,0,1360,CAL +28,11,1,1,3,0,0,144,5,4,18,2898,240.1,343,704,0,1408,CAL +28,12,1,1,3,0,0,144,5,3,17,2897,274.4,343,750,0,1500,CAL +28,13,1,1,3,0,0,144,5,2,16,2896,308.7,343,830,0,1660,CAL +28,14,1,1,3,1,0,146,4,2,12,2932,343,343,916,0,1832,CAL +28,15,1,1,3,1,0,146,3,5,11,2931,377.3,343,1008,0,2016,CAL +28,16,1,1,3,1,0,146,3,4,10,2930,411.6,343,1102,0,2204,CAL +28,17,1,1,3,1,0,146,3,3,9,2929,445.9,343,1190,0,2380,CAL +28,18,1,1,3,1,0,146,3,2,8,2928,480.2,343,1260,0,2520,CAL +28,19,1,1,3,2,0,148,3,2,8,2968,514.5,343,1314,0,2628,CAL +29,0,1,0,3,1,0,106,4,5,15,2135,-497.4,377.3,1346,0,2692,CAL +29,1,1,0,3,1,0,106,4,4,14,2134,-463,377.3,1308,0,2616,CAL +29,2,1,0,3,1,0,106,4,3,13,2133,-428.8,377.3,1251,0,2502,CAL +29,3,1,0,3,1,0,106,4,2,12,2132,-394.5,377.3,1170,0,2340,CAL +29,4,1,0,3,0,0,104,2,5,7,2087,-360.1,377.3,1077,0,2154,CAL +29,5,1,0,3,0,0,104,2,4,6,2086,-325.9,377.3,982,0,1964,CAL +29,6,1,0,3,0,0,104,2,3,5,2085,-291.5,377.3,893,0,1786,CAL +29,7,1,0,3,0,0,104,2,2,4,2084,-257.2,377.3,810,0,1620,CAL +29,8,1,0,4,2,0,116,5,3,17,2337,-222.9,377.3,733,0,1466,CAL +29,9,1,0,4,2,0,116,5,4,18,2338,-188.6,377.3,674,0,1348,CAL +29,10,1,0,4,2,0,116,5,5,19,2339,-154.4,377.3,675,0,1350,CAL +29,11,1,0,4,1,0,114,5,5,19,2299,-120.1,377.3,676,0,1352,CAL +29,12,1,1,4,1,0,154,1,2,0,3080,120.1,377.3,677,0,1354,CAL +29,13,1,1,4,2,0,156,1,2,0,3120,154.4,377.3,678,0,1356,CAL +29,14,1,1,4,2,0,156,1,3,1,3121,188.6,377.3,679,0,1358,CAL +29,15,1,1,4,2,0,156,1,4,2,3122,222.9,377.3,703,0,1406,CAL +29,16,1,1,3,0,0,144,4,5,15,2895,257.2,377.3,749,0,1498,CAL +29,17,1,1,3,0,0,144,4,4,14,2894,291.5,377.3,829,0,1658,CAL +29,18,1,1,3,0,0,144,4,3,13,2893,325.9,377.3,915,0,1830,CAL +29,19,1,1,3,0,0,144,4,2,12,2892,360.1,377.3,1007,0,2014,CAL +29,20,1,1,3,1,0,146,2,5,7,2927,394.5,377.3,1101,0,2202,CAL +29,21,1,1,3,1,0,146,2,4,6,2926,428.8,377.3,1189,0,2378,CAL +29,22,1,1,3,1,0,146,2,3,5,2925,463,377.3,1259,0,2518,CAL +29,23,1,1,3,1,0,146,2,2,4,2924,497.4,377.3,1313,0,2626,CAL +30,0,1,0,3,1,0,106,5,4,18,2138,-480.2,411.6,1347,0,2694,CAL +30,1,1,0,3,1,0,106,5,3,17,2137,-445.9,411.6,1309,0,2618,CAL +30,2,1,0,3,1,0,106,5,2,16,2136,-411.6,411.6,1252,0,2504,CAL +30,3,1,0,3,0,0,104,3,5,11,2091,-377.3,411.6,1171,0,2342,CAL +30,4,1,0,3,0,0,104,3,4,10,2090,-343,411.6,1078,0,2156,CAL +30,5,1,0,3,0,0,104,3,3,9,2089,-308.7,411.6,983,0,1966,CAL +30,6,1,0,3,0,0,104,3,2,8,2088,-274.4,411.6,894,0,1788,CAL +30,7,1,0,4,2,0,116,4,4,14,2334,-240.1,411.6,811,0,1622,CAL +30,8,1,0,4,2,0,116,4,5,15,2335,-205.8,411.6,690,0,1380,CAL +30,9,1,0,4,2,0,116,5,2,16,2336,-171.5,411.6,691,0,1382,CAL +30,10,1,0,4,1,0,114,5,2,16,2296,-137.2,411.6,692,0,1384,CAL +30,11,1,0,4,1,0,114,5,3,17,2297,-102.9,411.6,693,0,1386,CAL +30,12,1,0,4,1,0,114,5,4,18,2298,-68.6,411.6,694,0,1388,CAL +30,13,1,0,4,0,0,112,5,4,19,2258,-34.3,411.6,695,0,1390,CAL +30,14,1,0,4,0,0,112,5,5,18,2259,0,411.6,696,0,1392,CAL +30,15,1,1,4,0,0,152,1,2,0,3040,34.3,411.6,697,0,1394,CAL +30,16,1,1,4,1,0,154,1,3,1,3081,68.6,411.6,698,0,1396,CAL +30,17,1,1,4,1,0,154,1,4,2,3082,102.9,411.6,699,0,1398,CAL +30,18,1,1,4,1,0,154,1,5,3,3083,137.2,411.6,700,0,1400,CAL +30,19,1,1,4,2,0,156,1,5,3,3123,171.5,411.6,701,0,1402,CAL +30,20,1,1,4,2,0,156,2,2,4,3124,205.8,411.6,702,0,1404,CAL +30,21,1,1,4,2,0,156,2,3,5,3125,240.1,411.6,748,0,1496,CAL +30,22,1,1,3,0,0,144,3,5,11,2891,274.4,411.6,828,0,1656,CAL +30,23,1,1,3,0,0,144,3,4,10,2890,308.7,411.6,914,0,1828,CAL +30,24,1,1,3,0,0,144,3,3,9,2889,343,411.6,1006,0,2012,CAL +30,25,1,1,3,0,0,144,3,2,8,2888,377.3,411.6,1100,0,2200,CAL +30,26,1,1,3,1,0,146,1,5,3,2923,411.6,411.6,1188,0,2376,CAL +30,27,1,1,3,1,0,146,1,4,2,2922,445.9,411.6,1258,0,2516,CAL +30,28,1,1,3,1,0,146,1,3,1,2921,480.2,411.6,1312,0,2624,CAL +31,0,1,0,3,1,0,106,5,5,19,2139,-428.8,445.9,1310,0,2620,CAL +31,1,1,0,3,0,0,104,4,5,15,2095,-394.5,445.9,1253,0,2506,CAL +31,2,1,0,3,0,0,104,4,4,14,2094,-360.1,445.9,1172,0,2344,CAL +31,3,1,0,3,0,0,104,4,3,13,2093,-325.9,445.9,1079,0,2158,CAL +31,4,1,0,3,0,0,104,4,2,12,2092,-291.5,445.9,984,0,1968,CAL +31,5,1,0,4,2,0,116,3,5,11,2331,-257.2,445.9,895,0,1790,CAL +31,6,1,0,4,2,0,116,4,2,12,2332,-222.9,445.9,734,0,1468,CAL +31,7,1,0,4,2,0,116,4,3,13,2333,-188.6,445.9,735,0,1470,CAL +31,8,1,0,4,1,0,114,4,3,13,2293,-154.4,445.9,736,0,1472,CAL +31,9,1,0,4,1,0,114,4,4,14,2294,-120.1,445.9,737,0,1474,CAL +31,10,1,0,4,1,0,114,4,5,15,2295,-85.8,445.9,738,0,1476,CAL +31,11,1,0,4,0,0,112,5,2,17,2256,-51.5,445.9,739,0,1478,CAL +31,12,1,0,4,0,0,112,5,3,16,2257,-17.1,445.9,740,0,1480,CAL +31,13,1,1,4,0,0,152,1,3,1,3041,17.1,445.9,741,0,1482,CAL +31,14,1,1,4,0,0,152,1,4,2,3042,51.5,445.9,742,0,1484,CAL +31,15,1,1,4,1,0,154,2,2,4,3084,85.8,445.9,743,0,1486,CAL +31,16,1,1,4,1,0,154,2,3,5,3085,120.1,445.9,744,0,1488,CAL +31,17,1,1,4,1,0,154,2,4,6,3086,154.4,445.9,745,0,1490,CAL +31,18,1,1,4,2,0,156,2,4,6,3126,188.6,445.9,746,0,1492,CAL +31,19,1,1,4,2,0,156,2,5,7,3127,222.9,445.9,747,0,1494,CAL +31,20,1,1,4,2,0,156,3,2,8,3128,257.2,445.9,827,0,1654,CAL +31,21,1,1,3,0,0,144,2,5,7,2887,291.5,445.9,913,0,1826,CAL +31,22,1,1,3,0,0,144,2,4,6,2886,325.9,445.9,1005,0,2010,CAL +31,23,1,1,3,0,0,144,2,3,5,2885,360.1,445.9,1099,0,2198,CAL +31,24,1,1,3,0,0,144,2,2,4,2884,394.5,445.9,1187,0,2374,CAL +31,25,1,1,3,1,0,146,1,2,0,2920,428.8,445.9,1257,0,2514,CAL +32,0,1,0,3,0,0,104,5,4,18,2098,-411.6,480.2,1311,0,2622,CAL +32,1,1,0,3,0,0,104,5,3,17,2097,-377.3,480.2,1254,0,2508,CAL +32,2,1,0,3,0,0,104,5,2,16,2096,-343,480.2,1173,0,2346,CAL +32,3,1,0,4,2,0,116,2,5,7,2327,-308.7,480.2,1080,0,2160,CAL +32,4,1,0,4,2,0,116,3,2,8,2328,-274.4,480.2,985,0,1970,CAL +32,5,1,0,4,2,0,116,3,3,9,2329,-240.1,480.2,812,0,1624,CAL +32,6,1,0,4,2,0,116,3,4,10,2330,-205.8,480.2,813,0,1626,CAL +32,7,1,0,4,1,0,114,3,4,10,2290,-171.5,480.2,814,0,1628,CAL +32,8,1,0,4,1,0,114,3,5,11,2291,-137.2,480.2,815,0,1630,CAL +32,9,1,0,4,1,0,114,4,2,12,2292,-102.9,480.2,816,0,1632,CAL +32,10,1,0,4,0,0,112,4,4,15,2254,-68.6,480.2,817,0,1634,CAL +32,11,1,0,4,0,0,112,4,5,14,2255,-34.3,480.2,818,0,1636,CAL +32,12,1,1,4,0,0,152,1,5,3,3043,0,480.2,819,0,1638,CAL +32,13,1,1,4,0,0,152,2,2,4,3044,34.3,480.2,820,0,1640,CAL +32,14,1,1,4,0,0,152,2,3,5,3045,68.6,480.2,821,0,1642,CAL +32,15,1,1,4,1,0,154,2,5,7,3087,102.9,480.2,822,0,1644,CAL +32,16,1,1,4,1,0,154,3,2,8,3088,137.2,480.2,823,0,1646,CAL +32,17,1,1,4,1,0,154,3,3,9,3089,171.5,480.2,824,0,1648,CAL +32,18,1,1,4,2,0,156,3,3,9,3129,205.8,480.2,825,0,1650,CAL +32,19,1,1,4,2,0,156,3,4,10,3130,240.1,480.2,826,0,1652,CAL +32,20,1,1,4,2,0,156,3,5,11,3131,274.4,480.2,912,0,1824,CAL +32,21,1,1,4,2,0,156,4,2,12,3132,308.7,480.2,1004,0,2008,CAL +32,22,1,1,3,0,0,144,1,5,3,2883,343,480.2,1098,0,2196,CAL +32,23,1,1,3,0,0,144,1,4,2,2882,377.3,480.2,1186,0,2372,CAL +32,24,1,1,3,0,0,144,1,3,1,2881,411.6,480.2,1256,0,2512,CAL +33,0,1,0,3,0,0,104,5,5,19,2099,-360.1,514.5,1255,0,2510,CAL +33,1,1,0,4,2,0,116,1,5,3,2323,-325.9,514.5,1174,0,2348,CAL +33,2,1,0,4,2,0,116,2,2,4,2324,-291.5,514.5,1081,0,2162,CAL +33,3,1,0,4,2,0,116,2,3,5,2325,-257.2,514.5,896,0,1792,CAL +33,4,1,0,4,2,0,116,2,4,6,2326,-222.9,514.5,897,0,1794,CAL +33,5,1,0,4,1,0,114,2,5,7,2287,-188.6,514.5,898,0,1796,CAL +33,6,1,0,4,1,0,114,3,2,8,2288,-154.4,514.5,899,0,1798,CAL +33,7,1,0,4,1,0,114,3,3,9,2289,-120.1,514.5,900,0,1800,CAL +33,8,1,0,4,0,1,112,3,5,13,2251,-85.8,514.5,901,1,1803,CAL +33,9,1,0,4,0,0,112,4,2,12,2252,-51.5,514.5,902,0,1804,CAL +33,10,1,0,4,0,0,112,4,3,11,2253,-17.1,514.5,903,0,1806,CAL +33,11,1,1,4,0,0,152,2,4,6,3046,17.1,514.5,904,0,1808,CAL +33,12,1,1,4,0,0,152,2,5,7,3047,51.5,514.5,905,0,1810,CAL +33,13,1,1,4,0,0,152,3,2,8,3048,85.8,514.5,906,0,1812,CAL +33,14,1,1,4,1,0,154,3,4,10,3090,120.1,514.5,907,0,1814,CAL +33,15,1,1,4,1,0,154,3,5,11,3091,154.4,514.5,908,0,1816,CAL +33,16,1,1,4,1,0,154,4,2,12,3092,188.6,514.5,909,0,1818,CAL +33,17,1,1,4,2,0,156,4,3,13,3133,222.9,514.5,910,0,1820,CAL +33,18,1,1,4,2,0,156,4,4,14,3134,257.2,514.5,911,0,1822,CAL +33,19,1,1,4,2,0,156,4,5,15,3135,291.5,514.5,1003,0,2006,CAL +33,20,1,1,4,2,0,156,5,2,16,3136,325.9,514.5,1097,0,2194,CAL +33,21,1,1,3,0,0,144,1,2,0,2880,360.1,514.5,1185,0,2370,CAL +34,0,1,0,4,2,0,116,1,2,0,2320,-308.7,548.8,1175,0,2350,CAL +34,1,1,0,4,2,0,116,1,3,1,2321,-274.4,548.8,986,0,1972,CAL +34,2,1,0,4,2,0,116,1,4,2,2322,-240.1,548.8,987,0,1974,CAL +34,3,1,0,4,1,0,114,1,5,3,2283,-205.8,548.8,988,0,1976,CAL +34,4,1,0,4,1,0,114,2,2,4,2284,-171.5,548.8,989,0,1978,CAL +34,5,1,0,4,1,0,114,2,3,5,2285,-137.2,548.8,990,0,1980,CAL +34,6,1,0,4,1,0,114,2,4,6,2286,-102.9,548.8,991,0,1982,CAL +34,7,1,0,4,0,0,112,3,2,10,2248,-68.6,548.8,992,0,1984,CAL +34,8,1,0,4,0,0,112,3,3,9,2249,-34.3,548.8,993,0,1986,CAL +34,9,1,0,4,0,0,112,3,4,8,2250,0,548.8,994,0,1988,CAL +34,10,1,1,4,0,0,152,3,3,9,3049,34.3,548.8,995,0,1990,CAL +34,11,1,1,4,0,0,152,3,4,10,3050,68.6,548.8,996,0,1992,CAL +34,12,1,1,4,0,0,152,3,5,11,3051,102.9,548.8,997,0,1994,CAL +34,13,1,1,4,1,0,154,4,3,13,3093,137.2,548.8,998,0,1996,CAL +34,14,1,1,4,1,0,154,4,4,14,3094,171.5,548.8,999,0,1998,CAL +34,15,1,1,4,1,0,154,4,5,15,3095,205.8,548.8,1000,0,2000,CAL +34,16,1,1,4,2,0,156,5,3,17,3137,240.1,548.8,1001,0,2002,CAL +34,17,1,1,4,2,0,156,5,4,18,3138,274.4,548.8,1002,0,2004,CAL +34,18,1,1,4,2,0,156,5,5,19,3139,308.7,548.8,1096,0,2192,CAL +35,0,1,0,4,1,0,114,1,2,0,2280,-222.9,583.1,1082,0,2164,CAL +35,1,1,0,4,1,0,114,1,3,1,2281,-188.6,583.1,1083,0,2166,CAL +35,2,1,0,4,1,0,114,1,4,2,2282,-154.4,583.1,1084,0,2168,CAL +35,3,1,0,4,0,0,112,2,2,7,2244,-120.1,583.1,1085,0,2170,CAL +35,4,1,0,4,0,0,112,2,3,6,2245,-85.8,583.1,1086,0,2172,CAL +35,5,1,0,4,0,0,112,2,4,5,2246,-51.5,583.1,1087,0,2174,CAL +35,6,1,0,4,0,0,112,2,5,4,2247,-17.1,583.1,1088,0,2176,CAL +35,7,1,1,4,0,0,152,4,2,12,3052,17.1,583.1,1089,0,2178,CAL +35,8,1,1,4,0,0,152,4,3,13,3053,51.5,583.1,1090,0,2180,CAL +35,9,1,1,4,0,0,152,4,4,14,3054,85.8,583.1,1091,0,2182,CAL +35,10,1,1,4,0,0,152,4,5,15,3055,120.1,583.1,1092,0,2184,CAL +35,11,1,1,4,1,0,154,5,2,16,3096,154.4,583.1,1093,0,2186,CAL +35,12,1,1,4,1,0,154,5,3,17,3097,188.6,583.1,1094,0,2188,CAL +35,13,1,1,4,1,0,154,5,4,18,3098,222.9,583.1,1095,0,2190,CAL +36,0,1,0,4,0,0,112,1,2,3,2240,-137.2,617.4,1176,0,2352,CAL +36,1,1,0,4,0,0,112,1,3,2,2241,-102.9,617.4,1177,0,2354,CAL +36,2,1,0,4,0,1,112,1,4,1,2242,-68.6,617.4,1178,1,2357,CAL +36,3,1,0,4,0,0,112,1,5,0,2243,-34.3,617.4,1179,0,2358,CAL +36,4,1,1,4,0,0,152,5,2,16,3056,0,617.4,1180,0,2360,CAL +36,5,1,1,4,0,0,152,5,3,17,3057,34.3,617.4,1181,0,2362,CAL +36,6,1,1,4,0,0,152,5,4,18,3058,68.6,617.4,1182,0,2364,CAL +36,7,1,1,4,0,0,152,5,5,19,3059,102.9,617.4,1183,0,2366,CAL +36,8,1,1,4,1,0,154,5,5,19,3099,137.2,617.4,1184,0,2368,CAL +0,0,1,0,0,2,1,85,5,5,19,1719,-137.2,-617.4,1224,1,2449,CAL +0,1,1,0,0,3,1,87,5,2,16,1756,-102.9,-617.4,1223,1,2447,CAL +0,2,1,0,0,3,1,87,5,3,17,1757,-68.6,-617.4,1222,1,2445,CAL +0,3,1,0,0,3,1,87,5,4,18,1758,-34.3,-617.4,1221,1,2443,CAL +0,4,1,0,0,3,1,87,5,5,19,1759,0,-617.4,1220,1,2441,CAL +0,5,1,1,0,3,1,127,1,2,0,2540,34.3,-617.4,1219,1,2439,CAL +0,6,1,1,0,3,1,127,1,3,1,2541,68.6,-617.4,1218,1,2437,CAL +0,7,1,1,0,3,1,127,1,4,2,2542,102.9,-617.4,1217,1,2435,CAL +0,8,1,1,0,3,1,127,1,5,3,2543,137.2,-617.4,1216,1,2433,CAL +1,0,1,0,0,2,1,85,4,5,15,1715,-222.9,-583.1,1142,1,2285,CAL +1,1,1,0,0,2,1,85,5,2,16,1716,-188.6,-583.1,1141,1,2283,CAL +1,2,1,0,0,2,1,85,5,3,17,1717,-154.4,-583.1,1140,1,2281,CAL +1,3,1,0,0,2,1,85,5,4,18,1718,-120.1,-583.1,1139,1,2279,CAL +1,4,1,0,0,3,1,87,4,3,13,1753,-85.8,-583.1,1138,1,2277,CAL +1,5,1,0,0,3,1,87,4,4,14,1754,-51.5,-583.1,1137,1,2275,CAL +1,6,1,0,0,3,1,87,4,5,15,1755,-17.1,-583.1,1136,1,2273,CAL +1,7,1,1,0,3,1,127,2,2,4,2544,17.1,-583.1,1135,1,2271,CAL +1,8,1,1,0,3,1,127,2,3,5,2545,51.5,-583.1,1134,1,2269,CAL +1,9,1,1,0,3,1,127,2,4,6,2546,85.8,-583.1,1133,1,2267,CAL +1,10,1,1,0,3,1,127,2,5,7,2547,120.1,-583.1,1132,1,2265,CAL +1,11,1,1,0,2,1,125,1,2,0,2500,154.4,-583.1,1131,1,2263,CAL +1,12,1,1,0,2,1,125,1,3,1,2501,188.6,-583.1,1130,1,2261,CAL +1,13,1,1,0,2,1,125,1,4,2,2502,222.9,-583.1,1129,1,2259,CAL +2,0,1,0,0,1,1,83,5,4,18,1678,-308.7,-548.8,1143,1,2287,CAL +2,1,1,0,0,1,1,83,5,5,19,1679,-274.4,-548.8,1050,1,2101,CAL +2,2,1,0,0,2,1,85,3,5,11,1711,-240.1,-548.8,1049,1,2099,CAL +2,3,1,0,0,2,1,85,4,2,12,1712,-205.8,-548.8,1048,1,2097,CAL +2,4,1,0,0,2,1,85,4,3,13,1713,-171.5,-548.8,1047,1,2095,CAL +2,5,1,0,0,2,1,85,4,4,14,1714,-137.2,-548.8,1046,1,2093,CAL +2,6,1,0,0,3,1,87,3,4,10,1750,-102.9,-548.8,1045,1,2091,CAL +2,7,1,0,0,3,1,87,3,5,11,1751,-68.6,-548.8,1044,1,2089,CAL +2,8,1,0,0,3,1,87,4,2,12,1752,-34.3,-548.8,1043,1,2087,CAL +2,9,1,1,0,3,1,127,3,2,8,2548,0,-548.8,1042,1,2085,CAL +2,10,1,1,0,3,1,127,3,3,9,2549,34.3,-548.8,1041,1,2083,CAL +2,11,1,1,0,3,1,127,3,4,10,2550,68.6,-548.8,1040,1,2081,CAL +2,12,1,1,0,3,1,127,3,5,11,2551,102.9,-548.8,1039,1,2079,CAL +2,13,1,1,0,2,1,125,1,5,3,2503,137.2,-548.8,1038,1,2077,CAL +2,14,1,1,0,2,1,125,2,2,4,2504,171.5,-548.8,1037,1,2075,CAL +2,15,1,1,0,2,1,125,2,3,5,2505,205.8,-548.8,1036,1,2073,CAL +2,16,1,1,0,2,1,125,2,4,6,2506,240.1,-548.8,1035,1,2071,CAL +2,17,1,1,0,1,1,123,1,2,0,2460,274.4,-548.8,1034,1,2069,CAL +2,18,1,1,0,1,1,123,1,3,1,2461,308.7,-548.8,1128,1,2257,CAL +3,0,1,0,0,0,1,81,5,5,19,1639,-360.1,-514.5,1225,1,2451,CAL +3,1,1,0,0,1,1,83,4,4,14,1674,-325.9,-514.5,1144,1,2289,CAL +3,2,1,0,0,1,1,83,4,5,15,1675,-291.5,-514.5,1051,1,2103,CAL +3,3,1,0,0,1,1,83,5,2,16,1676,-257.2,-514.5,956,1,1913,CAL +3,4,1,0,0,1,1,83,5,3,17,1677,-222.9,-514.5,955,1,1911,CAL +3,5,1,0,0,2,1,85,3,2,8,1708,-188.6,-514.5,954,1,1909,CAL +3,6,1,0,0,2,1,85,3,3,9,1709,-154.4,-514.5,953,1,1907,CAL +3,7,1,0,0,2,1,85,3,4,10,1710,-120.1,-514.5,952,1,1905,CAL +3,8,1,0,0,3,1,87,2,5,7,1747,-85.8,-514.5,951,1,1903,CAL +3,9,1,0,0,3,1,87,3,2,8,1748,-51.5,-514.5,950,1,1901,CAL +3,10,1,0,0,3,1,87,3,3,9,1749,-17.1,-514.5,949,1,1899,CAL +3,11,1,1,0,3,1,127,4,2,12,2552,17.1,-514.5,948,1,1897,CAL +3,12,1,1,0,3,1,127,4,3,13,2553,51.5,-514.5,947,1,1895,CAL +3,13,1,1,0,2,1,125,2,5,7,2507,85.8,-514.5,946,1,1893,CAL +3,14,1,1,0,2,1,125,3,2,8,2508,120.1,-514.5,945,1,1891,CAL +3,15,1,1,0,2,1,125,3,3,9,2509,154.4,-514.5,944,1,1889,CAL +3,16,1,1,0,2,1,125,3,4,10,2510,188.6,-514.5,943,1,1887,CAL +5,18,1,1,0,1,1,123,1,4,2,2462,188.6,-445.9,774,1,1549,CAL +3,18,1,1,0,1,1,123,1,5,3,2463,257.2,-514.5,941,1,1883,CAL +3,19,1,1,0,1,1,123,2,2,4,2464,291.5,-514.5,1033,1,2067,CAL +3,20,1,1,0,1,1,123,2,3,5,2465,325.9,-514.5,1127,1,2255,CAL +3,21,1,1,0,0,1,121,1,2,0,2420,360.1,-514.5,1215,1,2431,CAL +4,0,1,0,0,0,1,81,4,5,15,1635,-411.6,-480.2,1284,1,2569,CAL +4,1,1,0,0,0,1,81,5,2,16,1636,-377.3,-480.2,1226,1,2453,CAL +4,2,1,0,0,0,1,81,5,3,17,1637,-343,-480.2,1145,1,2291,CAL +4,3,1,0,0,0,1,81,5,4,18,1638,-308.7,-480.2,1052,1,2105,CAL +4,4,1,0,0,1,1,83,3,5,11,1671,-274.4,-480.2,957,1,1915,CAL +4,5,1,0,0,1,1,83,4,2,12,1672,-240.1,-480.2,868,1,1737,CAL +4,6,1,0,0,1,1,83,4,3,13,1673,-205.8,-480.2,867,1,1735,CAL +4,7,1,0,0,2,1,85,2,3,5,1705,-171.5,-480.2,866,1,1733,CAL +4,8,1,0,0,2,1,85,2,4,6,1706,-137.2,-480.2,865,1,1731,CAL +4,9,1,0,0,2,1,85,2,5,7,1707,-102.9,-480.2,864,1,1729,CAL +4,10,1,0,0,3,1,87,2,2,4,1744,-68.6,-480.2,863,1,1727,CAL +4,11,1,0,0,3,1,87,2,3,5,1745,-34.3,-480.2,862,1,1725,CAL +4,12,1,0,0,3,1,87,2,4,6,1746,0,-480.2,861,1,1723,CAL +4,13,1,1,0,3,1,127,4,4,14,2554,34.3,-480.2,860,1,1721,CAL +4,14,1,1,0,3,1,127,4,5,15,2555,68.6,-480.2,859,1,1719,CAL +4,15,1,1,0,2,1,125,3,5,11,2511,102.9,-480.2,858,1,1717,CAL +4,16,1,1,0,2,1,125,4,2,12,2512,137.2,-480.2,857,1,1715,CAL +4,17,1,1,0,2,1,125,4,3,13,2513,171.5,-480.2,856,1,1713,CAL +4,18,1,1,0,1,1,123,2,4,6,2466,205.8,-480.2,855,1,1711,CAL +4,19,1,1,0,1,1,123,2,5,7,2467,240.1,-480.2,854,1,1709,CAL +4,20,1,1,0,1,1,123,3,2,8,2468,274.4,-480.2,940,1,1881,CAL +4,21,1,1,0,1,1,123,3,3,9,2469,308.7,-480.2,1032,1,2065,CAL +4,22,1,1,0,0,1,121,1,3,1,2421,343,-480.2,1126,1,2253,CAL +4,23,1,1,0,0,1,121,1,4,2,2422,377.3,-480.2,1214,1,2429,CAL +4,24,1,1,0,0,1,121,1,5,3,2423,411.6,-480.2,1283,1,2567,CAL +5,0,1,0,1,3,1,95,5,5,19,1919,-428.8,-445.9,1285,1,2571,CAL +5,1,1,0,0,0,1,81,3,5,11,1631,-394.5,-445.9,1227,1,2455,CAL +5,2,1,0,0,0,1,81,4,2,12,1632,-360.1,-445.9,1146,1,2293,CAL +5,3,1,0,0,0,1,81,4,3,13,1633,-325.9,-445.9,1053,1,2107,CAL +5,4,1,0,0,0,1,81,4,4,14,1634,-291.5,-445.9,958,1,1917,CAL +5,5,1,0,0,1,1,83,3,2,8,1668,-257.2,-445.9,869,1,1739,CAL +5,6,1,0,0,1,1,83,3,3,9,1669,-222.9,-445.9,786,1,1573,CAL +5,7,1,0,0,1,1,83,3,4,10,1670,-188.6,-445.9,785,1,1571,CAL +5,8,1,0,0,2,1,85,1,4,2,1702,-154.4,-445.9,784,1,1569,CAL +5,9,1,0,0,2,1,85,1,5,3,1703,-120.1,-445.9,783,1,1567,CAL +5,10,1,0,0,2,1,85,2,2,4,1704,-85.8,-445.9,782,1,1565,CAL +5,11,1,0,0,3,1,87,1,4,2,1742,-51.5,-445.9,781,1,1563,CAL +5,12,1,0,0,3,1,87,1,5,3,1743,-17.1,-445.9,780,1,1561,CAL +5,13,1,1,0,3,1,127,5,2,16,2556,17.1,-445.9,779,1,1559,CAL +5,14,1,1,0,3,1,127,5,3,17,2557,51.5,-445.9,778,1,1557,CAL +5,15,1,1,0,2,1,125,4,4,14,2514,85.8,-445.9,777,1,1555,CAL +5,16,1,1,0,2,1,125,4,5,15,2515,120.1,-445.9,776,1,1553,CAL +5,17,1,1,0,2,1,125,5,2,16,2516,154.4,-445.9,775,1,1551,CAL +3,17,1,1,0,1,1,123,3,4,10,2470,222.9,-514.5,942,1,1885,CAL +5,19,1,1,0,1,1,123,3,5,11,2471,222.9,-445.9,773,1,1547,CAL +5,20,1,1,0,1,1,123,4,2,12,2472,257.2,-445.9,853,1,1707,CAL +5,21,1,1,0,1,1,123,4,3,13,2473,291.5,-445.9,939,1,1879,CAL +5,22,1,1,0,0,1,121,2,2,4,2424,325.9,-445.9,1031,1,2063,CAL +5,23,1,1,0,0,1,121,2,3,5,2425,360.1,-445.9,1125,1,2251,CAL +5,24,1,1,0,0,1,121,2,4,6,2426,394.5,-445.9,1213,1,2427,CAL +5,25,1,1,0,0,1,121,2,5,7,2427,428.8,-445.9,1282,1,2565,CAL +6,0,1,0,1,3,1,95,5,4,18,1918,-480.2,-411.6,1330,1,2661,CAL +6,1,1,0,1,3,1,95,5,3,17,1917,-445.9,-411.6,1286,1,2573,CAL +6,2,1,0,1,3,1,95,5,2,16,1916,-411.6,-411.6,1228,1,2457,CAL +6,3,1,0,0,0,1,81,2,5,7,1627,-377.3,-411.6,1147,1,2295,CAL +6,4,1,0,0,0,1,81,3,2,8,1628,-343,-411.6,1054,1,2109,CAL +6,5,1,0,0,0,1,81,3,3,9,1629,-308.7,-411.6,959,1,1919,CAL +6,6,1,0,0,0,1,81,3,4,10,1630,-274.4,-411.6,870,1,1741,CAL +6,7,1,0,0,1,1,83,2,3,5,1665,-240.1,-411.6,787,1,1575,CAL +6,8,1,0,0,1,1,83,2,4,6,1666,-205.8,-411.6,724,1,1449,CAL +6,9,1,0,0,1,1,83,2,5,7,1667,-171.5,-411.6,723,1,1447,CAL +6,10,1,0,0,2,1,85,1,2,0,1700,-137.2,-411.6,722,1,1445,CAL +6,11,1,0,0,2,1,85,1,3,1,1701,-102.9,-411.6,721,1,1443,CAL +6,12,1,0,0,3,1,87,1,2,0,1740,-68.6,-411.6,720,1,1441,CAL +6,13,1,0,0,3,1,87,1,3,1,1741,-34.3,-411.6,719,1,1439,CAL +6,14,1,1,0,3,1,127,5,4,18,2558,0,-411.6,718,1,1437,CAL +6,15,1,1,0,3,1,127,5,5,19,2559,34.3,-411.6,717,1,1435,CAL +6,16,1,1,0,2,1,125,5,3,17,2517,68.6,-411.6,716,1,1433,CAL +6,17,1,1,0,2,1,125,5,4,18,2518,102.9,-411.6,715,1,1431,CAL +6,18,1,1,0,2,1,125,5,5,19,2519,137.2,-411.6,714,1,1429,CAL +6,19,1,1,0,1,1,123,4,4,14,2474,171.5,-411.6,713,1,1427,CAL +6,20,1,1,0,1,1,123,4,5,15,2475,205.8,-411.6,712,1,1425,CAL +6,21,1,1,0,1,1,123,5,2,16,2476,240.1,-411.6,772,1,1545,CAL +6,22,1,1,0,0,1,121,3,2,8,2428,274.4,-411.6,852,1,1705,CAL +6,23,1,1,0,0,1,121,3,3,9,2429,308.7,-411.6,938,1,1877,CAL +6,24,1,1,0,0,1,121,3,4,10,2430,343,-411.6,1030,1,2061,CAL +6,25,1,1,0,0,1,121,3,5,11,2431,377.3,-411.6,1124,1,2249,CAL +6,26,1,1,1,3,1,135,1,4,2,2702,411.6,-411.6,1212,1,2425,CAL +6,27,1,1,1,3,1,135,1,3,1,2701,445.9,-411.6,1281,1,2563,CAL +6,28,1,1,1,3,1,135,1,2,0,2700,480.2,-411.6,1329,1,2659,CAL +7,0,1,0,1,3,1,95,4,5,15,1915,-497.4,-377.3,1331,1,2663,CAL +7,1,1,0,1,3,1,95,4,4,14,1914,-463,-377.3,1287,1,2575,CAL +7,2,1,0,1,3,1,95,4,3,13,1913,-428.8,-377.3,1229,1,2459,CAL +7,3,1,0,1,3,1,95,4,2,12,1912,-394.5,-377.3,1148,1,2297,CAL +7,4,1,0,1,3,1,95,3,5,11,1911,-360.1,-377.3,1055,1,2111,CAL +7,5,1,0,0,0,1,81,2,2,4,1624,-325.9,-377.3,960,1,1921,CAL +7,6,1,0,0,0,1,81,2,3,5,1625,-291.5,-377.3,871,1,1743,CAL +7,7,1,0,0,0,1,81,2,4,6,1626,-257.2,-377.3,788,1,1577,CAL +7,8,1,0,0,1,1,83,1,3,1,1661,-222.9,-377.3,725,1,1451,CAL +7,9,1,0,0,1,1,83,1,4,2,1662,-188.6,-377.3,687,1,1375,CAL +7,10,1,0,0,1,1,83,1,5,3,1663,-154.4,-377.3,686,1,1373,CAL +7,11,1,0,0,1,1,83,2,2,4,1664,-120.1,-377.3,685,1,1371,CAL +7,12,1,1,0,1,1,123,5,3,17,2477,120.1,-377.3,684,1,1369,CAL +7,13,1,1,0,1,1,123,5,4,18,2478,154.4,-377.3,683,1,1367,CAL +7,14,1,1,0,1,1,123,5,5,19,2479,188.6,-377.3,682,1,1365,CAL +7,15,1,1,0,0,1,121,4,2,12,2432,222.9,-377.3,711,1,1423,CAL +7,16,1,1,0,0,1,121,4,3,13,2433,257.2,-377.3,771,1,1543,CAL +7,17,1,1,0,0,1,121,4,4,14,2434,291.5,-377.3,851,1,1703,CAL +7,18,1,1,0,0,1,121,4,5,15,2435,325.9,-377.3,937,1,1875,CAL +7,19,1,1,1,3,1,135,2,5,7,2707,360.1,-377.3,1029,1,2059,CAL +7,20,1,1,1,3,1,135,2,4,6,2706,394.5,-377.3,1123,1,2247,CAL +7,21,1,1,1,3,1,135,2,3,5,2705,428.8,-377.3,1211,1,2423,CAL +7,22,1,1,1,3,1,135,2,2,4,2704,463,-377.3,1280,1,2561,CAL +7,23,1,1,1,3,1,135,1,5,3,2703,497.4,-377.3,1328,1,2657,CAL +8,0,1,0,1,2,1,93,5,5,19,1879,-514.5,-343,1332,1,2665,CAL +8,1,1,0,1,3,1,95,3,4,10,1910,-480.2,-343,1288,1,2577,CAL +8,2,1,0,1,3,1,95,3,3,9,1909,-445.9,-343,1230,1,2461,CAL +8,3,1,0,1,3,1,95,3,2,8,1908,-411.6,-343,1149,1,2299,CAL +8,4,1,0,1,3,1,95,2,5,7,1907,-377.3,-343,1056,1,2113,CAL +8,5,1,0,1,3,1,95,2,4,6,1906,-343,-343,961,1,1923,CAL +8,6,1,0,0,0,1,81,1,3,1,1621,-308.7,-343,872,1,1745,CAL +8,7,1,0,0,0,1,81,1,4,2,1622,-274.4,-343,789,1,1579,CAL +8,8,1,0,0,0,1,81,1,5,3,1623,-240.1,-343,726,1,1453,CAL +8,9,1,0,0,1,1,83,1,2,0,1660,-205.8,-343,688,1,1377,CAL +8,10,1,1,0,0,1,121,5,2,16,2436,205.8,-343,681,1,1363,CAL +8,11,1,1,0,0,1,121,5,3,17,2437,240.1,-343,710,1,1421,CAL +8,12,1,1,0,0,1,121,5,4,18,2438,274.4,-343,770,1,1541,CAL +8,13,1,1,1,3,1,135,4,2,12,2712,308.7,-343,850,1,1701,CAL +8,14,1,1,1,3,1,135,3,5,11,2711,343,-343,936,1,1873,CAL +8,15,1,1,1,3,1,135,3,4,10,2710,377.3,-343,1028,1,2057,CAL +8,16,1,1,1,3,1,135,3,3,9,2709,411.6,-343,1122,1,2245,CAL +8,17,1,1,1,3,1,135,3,2,8,2708,445.9,-343,1210,1,2421,CAL +8,18,1,1,1,2,1,133,1,3,1,2661,480.2,-343,1279,1,2559,CAL +8,19,1,1,1,2,1,133,1,2,0,2660,514.5,-343,1327,1,2655,CAL +9,0,1,0,1,2,1,93,5,4,18,1878,-531.7,-308.7,1333,1,2667,CAL +9,1,1,0,1,2,1,93,5,3,17,1877,-497.4,-308.7,1289,1,2579,CAL +9,2,1,0,1,2,1,93,5,2,16,1876,-463,-308.7,1231,1,2463,CAL +9,3,1,0,1,2,1,93,4,5,15,1875,-428.8,-308.7,1150,1,2301,CAL +9,4,1,0,1,3,1,95,2,3,5,1905,-394.5,-308.7,1057,1,2115,CAL +9,5,1,0,1,3,1,95,2,2,4,1904,-360.1,-308.7,962,1,1925,CAL +9,6,1,0,1,3,1,95,1,5,3,1903,-325.9,-308.7,873,1,1747,CAL +9,7,1,0,1,3,1,95,1,4,2,1902,-291.5,-308.7,790,1,1581,CAL +9,8,1,0,0,0,1,81,1,2,0,1620,-257.2,-308.7,727,1,1455,CAL +9,9,1,1,0,0,1,121,5,5,19,2439,257.2,-308.7,709,1,1419,CAL +9,10,1,1,1,3,1,135,5,2,16,2716,291.5,-308.7,769,1,1539,CAL +9,11,1,1,1,3,1,135,4,5,15,2715,325.9,-308.7,849,1,1699,CAL +9,12,1,1,1,3,1,135,4,4,14,2714,360.1,-308.7,935,1,1871,CAL +9,13,1,1,1,3,1,135,4,3,13,2713,394.5,-308.7,1027,1,2055,CAL +9,14,1,1,1,2,1,133,2,3,5,2665,428.8,-308.7,1121,1,2243,CAL +9,15,1,1,1,2,1,133,2,2,4,2664,463,-308.7,1209,1,2419,CAL +9,16,1,1,1,2,1,133,1,5,3,2663,497.4,-308.7,1278,1,2557,CAL +9,17,1,1,1,2,1,133,1,4,2,2662,531.7,-308.7,1326,1,2653,CAL +10,0,1,0,1,2,1,93,4,4,14,1874,-548.8,-274.4,1334,1,2669,CAL +10,1,1,0,1,2,1,93,4,3,13,1873,-514.5,-274.4,1290,1,2581,CAL +10,2,1,0,1,2,1,93,4,2,12,1872,-480.2,-274.4,1232,1,2465,CAL +10,3,1,0,1,2,1,93,3,5,11,1871,-445.9,-274.4,1151,1,2303,CAL +10,4,1,0,1,2,1,93,3,4,10,1870,-411.6,-274.4,1058,1,2117,CAL +10,5,1,0,1,2,1,93,3,3,9,1869,-377.3,-274.4,963,1,1927,CAL +10,6,1,0,1,3,1,95,1,3,1,1901,-343,-274.4,874,1,1749,CAL +10,7,1,0,1,3,1,95,1,2,0,1900,-308.7,-274.4,791,1,1583,CAL +10,8,1,1,1,3,1,135,5,5,19,2719,308.7,-274.4,768,1,1537,CAL +10,9,1,1,1,3,1,135,5,4,18,2718,343,-274.4,848,1,1697,CAL +10,10,1,1,1,3,1,135,5,3,17,2717,377.3,-274.4,934,1,1869,CAL +10,11,1,1,1,2,1,133,3,4,10,2670,411.6,-274.4,1026,1,2053,CAL +10,12,1,1,1,2,1,133,3,3,9,2669,445.9,-274.4,1120,1,2241,CAL +10,13,1,1,1,2,1,133,3,2,8,2668,480.2,-274.4,1208,1,2417,CAL +10,14,1,1,1,2,1,133,2,5,7,2667,514.5,-274.4,1277,1,2555,CAL +10,15,1,1,1,2,1,133,2,4,6,2666,548.8,-274.4,1325,1,2651,CAL +11,0,1,0,1,1,1,91,5,4,18,1838,-566,-240.1,1335,1,2671,CAL +11,1,1,0,1,1,1,91,5,3,17,1837,-531.7,-240.1,1291,1,2583,CAL +11,2,1,0,1,1,1,91,5,2,16,1836,-497.4,-240.1,1233,1,2467,CAL +11,3,1,0,1,2,1,93,3,2,8,1868,-463,-240.1,1152,1,2305,CAL +11,4,1,0,1,2,1,93,2,5,7,1867,-428.8,-240.1,1059,1,2119,CAL +11,5,1,0,1,2,1,93,2,4,6,1866,-394.5,-240.1,964,1,1929,CAL +11,6,1,0,1,2,1,93,2,3,5,1865,-360.1,-240.1,875,1,1751,CAL +11,7,1,0,1,2,1,93,2,2,4,1864,-325.9,-240.1,792,1,1585,CAL +11,8,1,1,1,2,1,133,4,5,12,2675,325.9,-240.1,767,1,1535,CAL +11,9,1,1,1,2,1,133,4,4,13,2674,360.1,-240.1,847,1,1695,CAL +11,10,1,1,1,2,1,133,4,3,14,2673,394.5,-240.1,933,1,1867,CAL +11,11,1,1,1,2,1,133,4,2,15,2672,428.8,-240.1,1025,1,2051,CAL +11,12,1,1,1,2,1,133,3,5,11,2671,463,-240.1,1119,1,2239,CAL +11,13,1,1,1,1,1,131,1,5,3,2623,497.4,-240.1,1207,1,2415,CAL +11,14,1,1,1,1,1,131,1,4,2,2622,531.7,-240.1,1276,1,2553,CAL +11,15,1,1,1,1,1,131,1,3,1,2621,566,-240.1,1324,1,2649,CAL +12,0,1,0,1,1,1,91,4,5,15,1835,-583.1,-205.8,1336,1,2673,CAL +12,1,1,0,1,1,1,91,4,4,14,1834,-548.8,-205.8,1292,1,2585,CAL +12,2,1,0,1,1,1,91,4,3,13,1833,-514.5,-205.8,1234,1,2469,CAL +12,3,1,0,1,1,1,91,4,2,12,1832,-480.2,-205.8,1153,1,2307,CAL +12,4,1,0,1,2,1,93,1,5,3,1863,-445.9,-205.8,1060,1,2121,CAL +12,5,1,0,1,2,1,93,1,4,2,1862,-411.6,-205.8,965,1,1931,CAL +12,6,1,0,1,2,1,93,1,3,1,1861,-377.3,-205.8,876,1,1753,CAL +12,7,1,0,1,2,1,93,1,2,0,1860,-343,-205.8,793,1,1587,CAL +12,8,1,1,1,2,1,133,5,5,19,2679,343,-205.8,766,1,1533,CAL +12,9,1,1,1,2,1,133,5,4,18,2678,377.3,-205.8,846,1,1693,CAL +12,10,1,1,1,2,1,133,5,3,17,2677,411.6,-205.8,932,1,1865,CAL +12,11,1,1,1,2,1,133,5,2,16,2676,445.9,-205.8,1024,1,2049,CAL +12,12,1,1,1,1,1,131,2,5,7,2627,480.2,-205.8,1118,1,2237,CAL +12,13,1,1,1,1,1,131,2,4,6,2626,514.5,-205.8,1206,1,2413,CAL +12,14,1,1,1,1,1,131,2,3,5,2625,548.8,-205.8,1275,1,2551,CAL +12,15,1,1,1,1,1,131,2,2,4,2624,583.1,-205.8,1323,1,2647,CAL +13,0,1,0,1,1,1,91,3,5,11,1831,-600.2,-171.5,1337,1,2675,CAL +13,1,1,0,1,1,1,91,3,4,10,1830,-566,-171.5,1293,1,2587,CAL +13,2,1,0,1,1,1,91,3,3,9,1829,-531.7,-171.5,1235,1,2471,CAL +13,3,1,0,1,1,1,91,3,2,8,1828,-497.4,-171.5,1154,1,2309,CAL +13,4,1,0,1,1,1,91,2,5,7,1827,-463,-171.5,1061,1,2123,CAL +13,5,1,0,1,1,1,91,2,4,6,1826,-428.8,-171.5,966,1,1933,CAL +13,6,1,0,1,1,1,91,2,3,5,1825,-394.5,-171.5,877,1,1755,CAL +13,7,1,0,1,1,1,91,2,2,4,1824,-360.1,-171.5,794,1,1589,CAL +13,8,1,1,1,1,1,131,4,5,15,2635,360.1,-171.5,765,1,1531,CAL +13,9,1,1,1,1,1,131,4,4,14,2634,394.5,-171.5,845,1,1691,CAL +13,10,1,1,1,1,1,131,4,3,13,2633,428.8,-171.5,931,1,1863,CAL +13,11,1,1,1,1,1,131,4,2,12,2632,463,-171.5,1023,1,2047,CAL +13,12,1,1,1,1,1,131,3,5,11,2631,497.4,-171.5,1117,1,2235,CAL +13,13,1,1,1,1,1,131,3,4,10,2630,531.7,-171.5,1205,1,2411,CAL +13,14,1,1,1,1,1,131,3,3,9,2629,566,-171.5,1274,1,2549,CAL +13,15,1,1,1,1,1,131,3,2,8,2628,600.2,-171.5,1322,1,2645,CAL +14,0,1,0,1,0,1,89,5,3,17,1797,-617.4,-137.2,1338,1,2677,CAL +14,1,1,0,1,0,1,89,4,4,14,1794,-583.1,-137.2,1294,1,2589,CAL +14,2,1,0,1,0,1,89,3,5,11,1791,-548.8,-137.2,1236,1,2473,CAL +14,3,1,0,1,0,1,89,3,2,8,1788,-514.5,-137.2,1155,1,2311,CAL +14,4,1,0,1,1,1,91,1,5,3,1823,-480.2,-137.2,1062,1,2125,CAL +14,5,1,0,1,1,1,91,1,4,2,1822,-445.9,-137.2,967,1,1935,CAL +14,6,1,0,1,1,0,91,1,3,1,1821,-411.6,-137.2,878,0,1756,CAL +14,7,1,0,1,1,1,91,1,2,0,1820,-377.3,-137.2,795,1,1591,CAL +14,8,1,1,1,1,1,131,5,5,19,2639,377.3,-137.2,764,1,1529,CAL +14,9,1,1,1,1,1,131,5,4,18,2638,411.6,-137.2,844,1,1689,CAL +14,10,1,1,1,1,1,131,5,3,17,2637,445.9,-137.2,930,1,1861,CAL +14,11,1,1,1,1,1,131,5,2,16,2636,480.2,-137.2,1022,1,2045,CAL +16,11,1,1,1,0,1,129,3,4,10,2590,548.8,-68.6,1114,1,2229,CAL +14,13,1,1,1,0,1,129,2,5,7,2587,548.8,-137.2,1204,1,2409,CAL +14,14,1,1,1,0,1,129,2,2,4,2584,583.1,-137.2,1273,1,2547,CAL +14,15,1,1,1,0,1,129,1,3,1,2581,617.4,-137.2,1321,1,2643,CAL +15,0,1,0,1,0,1,89,5,2,16,1796,-600.2,-102.9,1295,1,2591,CAL +15,1,1,0,1,0,1,89,4,3,13,1793,-566,-102.9,1237,1,2475,CAL +15,2,1,0,1,0,1,89,3,4,10,1790,-531.7,-102.9,1156,1,2313,CAL +15,3,1,0,1,0,1,89,2,5,7,1787,-497.4,-102.9,1063,1,2127,CAL +15,4,1,0,1,0,1,89,2,3,5,1785,-463,-102.9,968,1,1937,CAL +15,5,1,0,1,0,1,89,1,5,3,1783,-428.8,-102.9,879,1,1759,CAL +15,6,1,0,1,0,1,89,1,3,1,1781,-394.5,-102.9,796,1,1593,CAL +15,7,1,1,1,0,1,129,5,3,17,2597,394.5,-102.9,763,1,1527,CAL +15,8,1,1,1,0,1,129,4,5,15,2595,428.8,-102.9,843,1,1687,CAL +15,9,1,1,1,0,1,129,4,3,13,2593,463,-102.9,929,1,1859,CAL +15,10,1,1,1,0,1,129,3,5,11,2591,497.4,-102.9,1021,1,2043,CAL +14,12,1,1,1,0,1,129,3,2,8,2588,514.5,-137.2,1116,1,2233,CAL +15,12,1,1,1,0,1,129,2,3,5,2585,566,-102.9,1203,1,2407,CAL +15,13,1,1,1,0,1,129,1,4,2,2582,600.2,-102.9,1272,1,2545,CAL +16,0,1,0,1,0,1,89,4,5,15,1795,-617.4,-68.6,1296,1,2593,CAL +16,1,1,0,1,0,1,89,4,2,12,1792,-583.1,-68.6,1238,1,2477,CAL +16,2,1,0,1,0,1,89,3,3,9,1789,-548.8,-68.6,1157,1,2315,CAL +16,3,1,0,1,0,1,89,2,4,6,1786,-514.5,-68.6,1064,1,2129,CAL +16,4,1,0,1,0,1,89,2,2,4,1784,-480.2,-68.6,969,1,1939,CAL +16,5,1,0,1,0,1,89,1,4,2,1782,-445.9,-68.6,880,1,1761,CAL +16,6,1,0,1,0,1,89,1,2,0,1780,-411.6,-68.6,797,1,1595,CAL +16,7,1,1,1,0,1,129,5,4,18,2598,411.6,-68.6,762,1,1525,CAL +16,8,1,1,1,0,1,129,5,2,16,2596,445.9,-68.6,842,1,1685,CAL +16,9,1,1,1,0,1,129,4,4,14,2594,480.2,-68.6,928,1,1857,CAL +16,10,1,1,1,0,1,129,4,2,12,2592,514.5,-68.6,1020,1,2041,CAL +15,11,1,1,1,0,1,129,3,3,9,2589,531.7,-102.9,1115,1,2231,CAL +16,12,1,1,1,0,1,129,2,4,6,2586,583.1,-68.6,1202,1,2405,CAL +16,13,1,1,1,0,1,129,1,5,3,2583,617.4,-68.6,1271,1,2543,CAL +17,0,1,0,2,2,1,101,5,5,19,2039,-634.5,-34.3,1297,1,2595,CAL +17,1,1,0,2,2,1,101,5,4,18,2038,-600.2,-34.3,1239,1,2479,CAL +17,2,1,0,2,2,1,101,5,2,16,2036,-566,-34.3,1158,1,2317,CAL +17,3,1,0,2,2,1,101,4,4,14,2034,-531.7,-34.3,1065,1,2131,CAL +17,4,1,0,2,2,1,101,3,5,11,2031,-497.4,-34.3,970,1,1941,CAL +17,5,1,0,2,2,1,101,3,2,8,2028,-463,-34.3,881,1,1763,CAL +17,6,1,0,2,2,1,101,2,3,5,2025,-428.8,-34.3,798,1,1597,CAL +17,7,1,0,2,2,1,101,1,4,2,2022,-394.5,-34.3,728,1,1457,CAL +17,8,1,1,2,2,1,141,5,5,19,2839,394.5,-34.3,708,1,1417,CAL +17,9,1,1,2,2,1,141,5,2,16,2836,428.8,-34.3,761,1,1523,CAL +17,10,1,1,2,2,1,141,4,3,13,2833,463,-34.3,841,1,1683,CAL +17,11,1,1,2,2,1,141,3,4,10,2830,497.4,-34.3,927,1,1855,CAL +17,12,1,1,2,2,1,141,2,5,7,2827,531.7,-34.3,1019,1,2039,CAL +17,13,1,1,2,2,1,141,2,2,4,2824,566,-34.3,1113,1,2227,CAL +17,14,1,1,2,2,1,141,1,4,2,2822,600.2,-34.3,1201,1,2403,CAL +17,15,1,1,2,2,1,141,1,2,0,2820,634.5,-34.3,1270,1,2541,CAL +18,0,1,0,2,2,1,101,5,3,17,2037,-617.4,0,1240,1,2481,CAL +18,1,1,0,2,2,1,101,4,5,15,2035,-583.1,0,1159,1,2319,CAL +18,2,1,0,2,2,1,101,4,3,13,2033,-548.8,0,1066,1,2133,CAL +18,3,1,0,2,2,1,101,3,4,10,2030,-514.5,0,971,1,1943,CAL +18,4,1,0,2,2,1,101,2,5,7,2027,-480.2,0,882,1,1765,CAL +18,5,1,0,2,2,1,101,2,2,4,2024,-445.9,0,799,1,1599,CAL +18,6,1,0,2,2,1,101,1,3,1,2021,-411.6,0,729,1,1459,CAL +18,7,1,1,2,2,1,141,5,3,17,2837,411.6,0,707,1,1415,CAL +18,8,1,1,2,2,1,141,4,4,14,2834,445.9,0,760,1,1521,CAL +18,9,1,1,2,2,1,141,3,5,11,2831,480.2,0,840,1,1681,CAL +18,10,1,1,2,2,1,141,3,2,8,2828,514.5,0,926,1,1853,CAL +18,11,1,1,2,2,1,141,2,3,5,2825,548.8,0,1018,1,2037,CAL +18,12,1,1,2,2,1,141,1,5,3,2823,583.1,0,1112,1,2225,CAL +18,13,1,1,2,2,1,141,1,3,1,2821,617.4,0,1200,1,2401,CAL +19,0,1,0,2,1,1,99,5,5,19,1999,-634.5,34.3,1298,1,2597,CAL +19,1,1,0,2,1,1,99,4,5,15,1995,-600.2,34.3,1241,1,2483,CAL +19,2,1,0,2,1,1,99,4,4,14,1994,-566,34.3,1160,1,2321,CAL +19,3,1,0,2,2,1,101,4,2,12,2032,-531.7,34.3,1067,1,2135,CAL +19,4,1,0,2,2,1,101,3,3,9,2029,-497.4,34.3,972,1,1945,CAL +19,5,1,0,2,2,1,101,2,4,6,2026,-463,34.3,883,1,1767,CAL +19,6,1,0,2,2,1,101,1,5,3,2023,-428.8,34.3,800,1,1601,CAL +19,7,1,0,2,2,1,101,1,2,0,2020,-394.5,34.3,730,1,1461,CAL +19,8,1,1,2,2,1,141,5,4,18,2838,394.5,34.3,706,1,1413,CAL +19,9,1,1,2,2,1,141,4,5,15,2835,428.8,34.3,759,1,1519,CAL +19,10,1,1,2,2,1,141,4,2,12,2832,463,34.3,839,1,1679,CAL +19,11,1,1,2,2,1,141,3,3,9,2829,497.4,34.3,925,1,1851,CAL +19,12,1,1,2,2,1,141,2,4,6,2826,531.7,34.3,1017,1,2035,CAL +19,13,1,1,2,1,1,139,1,4,2,2782,566,34.3,1111,1,2223,CAL +19,14,1,1,2,1,1,139,1,3,1,2781,600.2,34.3,1199,1,2399,CAL +19,15,1,1,2,1,1,139,1,2,0,2780,634.5,34.3,1269,1,2539,CAL +20,0,1,0,2,1,1,99,5,4,18,1998,-617.4,68.6,1299,1,2599,CAL +20,1,1,0,2,1,1,99,4,3,13,1993,-583.1,68.6,1242,1,2485,CAL +20,2,1,0,2,1,1,99,4,2,12,1992,-548.8,68.6,1161,1,2323,CAL +20,3,1,0,2,1,1,99,2,5,7,1987,-514.5,68.6,1068,1,2137,CAL +20,4,1,0,2,1,1,99,2,4,6,1986,-480.2,68.6,973,1,1947,CAL +20,5,1,0,2,1,1,99,1,5,3,1983,-445.9,68.6,884,1,1769,CAL +20,6,1,0,2,1,1,99,1,4,2,1982,-411.6,68.6,801,1,1603,CAL +20,7,1,1,2,1,1,139,5,4,18,2798,411.6,68.6,758,1,1517,CAL +20,8,1,1,2,1,1,139,5,2,16,2796,445.9,68.6,838,1,1677,CAL +20,9,1,1,2,1,1,139,4,4,14,2794,480.2,68.6,924,1,1849,CAL +20,10,1,1,2,1,1,139,4,2,12,2792,514.5,68.6,1016,1,2033,CAL +20,11,1,1,2,1,1,139,3,3,9,2789,548.8,68.6,1110,1,2221,CAL +20,12,1,1,2,1,1,139,2,4,6,2786,583.1,68.6,1198,1,2397,CAL +20,13,1,1,2,1,1,139,1,5,3,2783,617.4,68.6,1268,1,2537,CAL +21,0,1,0,2,1,1,99,5,3,17,1997,-600.2,102.9,1300,1,2601,CAL +21,1,1,0,2,1,1,99,3,3,9,1989,-566,102.9,1243,1,2487,CAL +21,2,1,0,2,1,1,99,3,2,8,1988,-531.7,102.9,1162,1,2325,CAL +21,3,1,0,2,1,1,99,2,3,5,1985,-497.4,102.9,1069,1,2139,CAL +21,4,1,0,2,1,1,99,2,2,4,1984,-463,102.9,974,1,1949,CAL +21,5,1,0,2,1,1,99,1,3,1,1981,-428.8,102.9,885,1,1771,CAL +21,6,1,0,2,1,1,99,1,2,0,1980,-394.5,102.9,802,1,1605,CAL +21,7,1,1,2,1,1,139,5,5,19,2799,394.5,102.9,757,1,1515,CAL +21,8,1,1,2,1,1,139,5,3,17,2797,428.8,102.9,837,1,1675,CAL +21,9,1,1,2,1,1,139,4,5,15,2795,463,102.9,923,1,1847,CAL +21,10,1,1,2,1,1,139,4,3,13,2793,497.4,102.9,1015,1,2031,CAL +21,11,1,1,2,1,1,139,3,4,10,2790,531.7,102.9,1109,1,2219,CAL +21,12,1,1,2,1,1,139,2,5,7,2787,566,102.9,1197,1,2395,CAL +21,13,1,1,2,1,1,139,2,2,4,2784,600.2,102.9,1267,1,2535,CAL +22,0,1,0,2,1,1,99,5,2,16,1996,-617.4,137.2,1339,1,2679,CAL +22,1,1,0,2,1,1,99,3,5,11,1991,-583.1,137.2,1301,1,2603,CAL +22,2,1,0,2,1,1,99,3,4,10,1990,-548.8,137.2,1244,1,2489,CAL +22,3,1,0,2,0,1,97,4,2,12,1952,-514.5,137.2,1163,1,2327,CAL +22,4,1,0,2,0,1,97,3,3,9,1949,-480.2,137.2,1070,1,2141,CAL +22,5,1,0,2,0,1,97,2,4,4,1946,-445.9,137.2,975,1,1951,CAL +22,6,1,0,2,0,1,97,1,5,3,1943,-411.6,137.2,886,1,1773,CAL +22,7,1,0,2,0,1,97,1,3,1,1941,-377.3,137.2,803,1,1607,CAL +22,8,1,1,2,0,1,137,5,4,18,2758,377.3,137.2,756,1,1513,CAL +22,9,1,1,2,0,1,137,5,2,16,2756,411.6,137.2,836,1,1673,CAL +22,10,1,1,2,0,1,137,4,3,13,2753,445.9,137.2,922,1,1845,CAL +22,11,1,1,2,0,1,137,3,4,10,2750,480.2,137.2,1014,1,2029,CAL +22,12,1,1,2,0,1,137,2,5,7,2747,514.5,137.2,1108,1,2217,CAL +22,13,1,1,2,1,1,139,3,5,11,2791,548.8,137.2,1196,1,2393,CAL +22,14,1,1,2,1,1,139,3,2,8,2788,583.1,137.2,1266,1,2533,CAL +22,15,1,1,2,1,1,139,2,3,5,2785,617.4,137.2,1320,1,2641,CAL +23,0,1,0,2,0,1,97,5,5,19,1959,-600.2,171.5,1340,1,2681,CAL +23,1,1,0,2,0,1,97,5,2,16,1956,-566,171.5,1302,1,2605,CAL +23,2,1,0,2,0,1,97,4,4,14,1954,-531.7,171.5,1245,1,2491,CAL +23,3,1,0,2,0,1,97,3,5,11,1951,-497.4,171.5,1164,1,2329,CAL +23,4,1,0,2,0,1,97,3,2,8,1948,-463,171.5,1071,1,2143,CAL +23,5,1,0,2,0,1,97,2,3,5,1945,-428.8,171.5,976,1,1953,CAL +23,6,1,0,2,0,1,97,1,4,2,1942,-394.5,171.5,887,1,1775,CAL +23,7,1,0,2,0,1,97,1,2,0,1940,-360.1,171.5,804,1,1609,CAL +23,8,1,1,2,0,1,137,5,5,19,2759,360.1,171.5,755,1,1511,CAL +23,9,1,1,2,0,1,137,5,3,17,2757,394.5,171.5,835,1,1671,CAL +23,10,1,1,2,0,1,137,4,4,14,2754,428.8,171.5,921,1,1843,CAL +23,11,1,1,2,0,1,137,3,5,11,2751,463,171.5,1013,1,2027,CAL +23,12,1,1,2,0,1,137,3,2,8,2748,497.4,171.5,1107,1,2215,CAL +23,13,1,1,2,0,1,137,2,3,5,2745,531.7,171.5,1195,1,2391,CAL +23,14,1,1,2,0,1,137,1,5,3,2743,566,171.5,1265,1,2531,CAL +23,15,1,1,2,0,1,137,1,2,0,2740,600.2,171.5,1319,1,2639,CAL +24,0,1,0,2,0,1,97,5,4,18,1958,-583.1,205.8,1341,1,2683,CAL +24,1,1,0,2,0,1,97,4,5,15,1955,-548.8,205.8,1303,1,2607,CAL +24,2,1,0,2,0,1,97,4,3,13,1953,-514.5,205.8,1246,1,2493,CAL +24,3,1,0,2,0,1,97,3,4,10,1950,-480.2,205.8,1165,1,2331,CAL +24,4,1,0,2,0,1,97,2,5,6,1947,-445.9,205.8,1072,1,2145,CAL +24,5,1,0,2,0,1,97,2,2,7,1944,-411.6,205.8,977,1,1955,CAL +24,6,1,0,3,2,1,109,1,3,1,2181,-377.3,205.8,888,1,1777,CAL +24,7,1,0,3,2,1,109,1,2,0,2180,-343,205.8,805,1,1611,CAL +24,8,1,1,3,2,1,149,5,5,19,2999,343,205.8,754,1,1509,CAL +24,9,1,1,3,2,1,149,5,4,18,2998,377.3,205.8,834,1,1669,CAL +24,10,1,1,2,0,1,137,4,5,15,2755,411.6,205.8,920,1,1841,CAL +24,11,1,1,2,0,1,137,4,2,12,2752,445.9,205.8,1012,1,2025,CAL +24,12,1,1,2,0,1,137,3,3,9,2749,480.2,205.8,1106,1,2213,CAL +24,13,1,1,2,0,1,137,2,4,6,2746,514.5,205.8,1194,1,2389,CAL +24,14,1,1,2,0,1,137,2,2,4,2744,548.8,205.8,1264,1,2529,CAL +24,15,1,1,2,0,1,137,1,3,1,2741,583.1,205.8,1318,1,2637,CAL +25,0,1,0,2,0,1,97,5,3,17,1957,-566,240.1,1342,1,2685,CAL +25,1,1,0,3,2,1,109,5,4,18,2198,-531.7,240.1,1304,1,2609,CAL +25,2,1,0,3,2,1,109,5,3,17,2197,-497.4,240.1,1247,1,2495,CAL +25,3,1,0,3,2,0,109,5,2,16,2196,-463,240.1,1166,0,2332,CAL +25,4,1,0,3,2,1,109,2,3,5,2185,-428.8,240.1,1073,1,2147,CAL +25,5,1,0,3,2,1,109,2,2,4,2184,-394.5,240.1,978,1,1957,CAL +25,6,1,0,3,2,1,109,1,5,3,2183,-360.1,240.1,889,1,1779,CAL +25,7,1,0,3,2,1,109,1,4,2,2182,-325.9,240.1,806,1,1613,CAL +25,8,1,1,3,2,1,149,5,3,17,2997,325.9,240.1,753,1,1507,CAL +25,9,1,1,3,2,1,149,5,2,16,2996,360.1,240.1,833,1,1667,CAL +25,10,1,1,3,2,1,149,4,5,15,2995,394.5,240.1,919,1,1839,CAL +25,11,1,1,3,2,1,149,4,4,14,2994,428.8,240.1,1011,1,2023,CAL +25,12,1,1,3,2,1,149,1,5,3,2983,463,240.1,1105,1,2211,CAL +25,13,1,1,3,2,1,149,1,4,2,2982,497.4,240.1,1193,1,2387,CAL +25,14,1,1,3,2,1,149,1,3,1,2981,531.7,240.1,1263,1,2527,CAL +25,15,1,1,2,0,1,137,1,4,2,2742,566,240.1,1317,1,2635,CAL +26,0,1,0,3,2,1,109,5,5,19,2199,-548.8,274.4,1343,1,2687,CAL +26,1,1,0,3,2,1,109,4,4,14,2194,-514.5,274.4,1305,1,2611,CAL +26,2,1,0,3,2,1,109,4,3,13,2193,-480.2,274.4,1248,1,2497,CAL +26,3,1,0,3,2,1,109,4,2,12,2192,-445.9,274.4,1167,1,2335,CAL +26,4,1,0,3,2,1,109,2,5,7,2187,-411.6,274.4,1074,1,2149,CAL +26,5,1,0,3,2,1,109,2,4,6,2186,-377.3,274.4,979,1,1959,CAL +26,6,1,0,3,1,1,107,1,3,1,2141,-343,274.4,890,1,1781,CAL +26,7,1,0,3,1,1,107,1,2,0,2140,-308.7,274.4,807,1,1615,CAL +26,8,1,1,3,1,1,147,5,5,19,2959,308.7,274.4,752,1,1505,CAL +26,9,1,1,3,1,1,147,5,4,18,2958,343,274.4,832,1,1665,CAL +26,10,1,1,3,2,1,149,4,3,13,2993,377.3,274.4,918,1,1837,CAL +26,11,1,1,3,2,1,149,4,2,12,2992,411.6,274.4,1010,1,2021,CAL +26,12,1,1,3,2,1,149,2,5,7,2987,445.9,274.4,1104,1,2209,CAL +26,13,1,1,3,2,1,149,2,4,6,2986,480.2,274.4,1192,1,2385,CAL +26,14,1,1,3,2,1,149,2,3,5,2985,514.5,274.4,1262,1,2525,CAL +26,15,1,1,3,2,1,149,1,2,0,2980,548.8,274.4,1316,1,2633,CAL +27,0,1,0,3,2,1,109,4,5,15,2195,-531.7,308.7,1344,1,2689,CAL +27,1,1,0,3,2,1,109,3,4,10,2190,-497.4,308.7,1306,1,2613,CAL +27,2,1,0,3,2,1,109,3,3,9,2189,-463,308.7,1249,1,2499,CAL +27,3,1,0,3,2,1,109,3,2,8,2188,-428.8,308.7,1168,1,2337,CAL +27,4,1,0,3,1,1,107,2,4,6,2146,-394.5,308.7,1075,1,2151,CAL +27,5,1,0,3,1,1,107,2,3,5,2145,-360.1,308.7,980,1,1961,CAL +27,6,1,0,3,1,1,107,2,2,4,2144,-325.9,308.7,891,1,1783,CAL +27,7,1,0,3,1,1,107,1,5,3,2143,-291.5,308.7,808,1,1617,CAL +27,8,1,0,3,1,1,107,1,4,2,2142,-257.2,308.7,731,1,1463,CAL +27,9,1,1,3,1,1,147,5,3,17,2957,257.2,308.7,705,1,1411,CAL +27,10,1,1,3,1,1,147,5,2,16,2956,291.5,308.7,751,1,1503,CAL +27,11,1,1,3,1,1,147,4,5,15,2955,325.9,308.7,831,1,1663,CAL +27,12,1,1,3,1,1,147,4,4,14,2954,360.1,308.7,917,1,1835,CAL +27,13,1,1,3,1,1,147,4,3,13,2953,394.5,308.7,1009,1,2019,CAL +27,14,1,1,3,2,1,149,3,5,11,2991,428.8,308.7,1103,1,2207,CAL +27,15,1,1,3,2,1,149,3,4,10,2990,463,308.7,1191,1,2383,CAL +27,16,1,1,3,2,1,149,3,3,9,2989,497.4,308.7,1261,1,2523,CAL +27,17,1,1,3,2,1,149,2,2,4,2984,531.7,308.7,1315,1,2631,CAL +28,0,1,0,3,2,1,109,3,5,11,2191,-514.5,343,1345,1,2691,CAL +28,1,1,0,3,1,1,107,3,5,11,2151,-480.2,343,1307,1,2615,CAL +28,2,1,0,3,1,1,107,3,4,10,2150,-445.9,343,1250,1,2501,CAL +28,3,1,0,3,1,1,107,3,3,9,2149,-411.6,343,1169,1,2339,CAL +28,4,1,0,3,1,1,107,3,2,8,2148,-377.3,343,1076,1,2153,CAL +28,5,1,0,3,1,1,107,2,5,7,2147,-343,343,981,1,1963,CAL +28,6,1,0,3,0,1,105,1,5,3,2103,-308.7,343,892,1,1785,CAL +28,7,1,0,3,0,1,105,1,4,2,2102,-274.4,343,809,1,1619,CAL +28,8,1,0,3,0,1,105,1,3,1,2101,-240.1,343,732,1,1465,CAL +28,9,1,0,3,0,1,105,1,2,0,2100,-205.8,343,689,1,1379,CAL +28,10,1,1,3,0,1,145,5,5,19,2919,205.8,343,680,1,1361,CAL +28,11,1,1,3,0,1,145,5,4,18,2918,240.1,343,704,1,1409,CAL +28,12,1,1,3,0,1,145,5,3,17,2917,274.4,343,750,1,1501,CAL +28,13,1,1,3,0,1,145,5,2,16,2916,308.7,343,830,1,1661,CAL +28,14,1,1,3,1,1,147,4,2,12,2952,343,343,916,1,1833,CAL +28,15,1,1,3,1,1,147,3,5,11,2951,377.3,343,1008,1,2017,CAL +28,16,1,1,3,1,1,147,3,4,10,2950,411.6,343,1102,1,2205,CAL +28,17,1,1,3,1,1,147,3,3,9,2949,445.9,343,1190,1,2381,CAL +28,18,1,1,3,1,1,147,3,2,8,2948,480.2,343,1260,1,2521,CAL +28,19,1,1,3,2,1,149,3,2,8,2988,514.5,343,1314,1,2629,CAL +29,0,1,0,3,1,1,107,4,5,15,2155,-497.4,377.3,1346,1,2693,CAL +29,1,1,0,3,1,1,107,4,4,14,2154,-463,377.3,1308,1,2617,CAL +29,2,1,0,3,1,1,107,4,3,13,2153,-428.8,377.3,1251,1,2503,CAL +29,3,1,0,3,1,1,107,4,2,12,2152,-394.5,377.3,1170,1,2341,CAL +29,4,1,0,3,0,1,105,2,5,7,2107,-360.1,377.3,1077,1,2155,CAL +29,5,1,0,3,0,1,105,2,4,6,2106,-325.9,377.3,982,1,1965,CAL +29,6,1,0,3,0,1,105,2,3,5,2105,-291.5,377.3,893,1,1787,CAL +29,7,1,0,3,0,1,105,2,2,4,2104,-257.2,377.3,810,1,1621,CAL +29,8,1,0,4,2,1,117,5,3,17,2357,-222.9,377.3,733,1,1467,CAL +29,9,1,0,4,2,1,117,5,4,18,2358,-188.6,377.3,674,1,1349,CAL +29,10,1,0,4,2,1,117,5,5,19,2359,-154.4,377.3,675,1,1351,CAL +29,11,1,0,4,1,1,115,5,5,19,2319,-120.1,377.3,676,1,1353,CAL +29,12,1,1,4,1,1,155,1,2,0,3100,120.1,377.3,677,1,1355,CAL +29,13,1,1,4,2,1,157,1,2,0,3140,154.4,377.3,678,1,1357,CAL +29,14,1,1,4,2,1,157,1,3,1,3141,188.6,377.3,679,1,1359,CAL +29,15,1,1,4,2,1,157,1,4,2,3142,222.9,377.3,703,1,1407,CAL +29,16,1,1,3,0,1,145,4,5,15,2915,257.2,377.3,749,1,1499,CAL +29,17,1,1,3,0,1,145,4,4,14,2914,291.5,377.3,829,1,1659,CAL +29,18,1,1,3,0,1,145,4,3,13,2913,325.9,377.3,915,1,1831,CAL +29,19,1,1,3,0,1,145,4,2,12,2912,360.1,377.3,1007,1,2015,CAL +29,20,1,1,3,1,1,147,2,5,7,2947,394.5,377.3,1101,1,2203,CAL +29,21,1,1,3,1,1,147,2,4,6,2946,428.8,377.3,1189,1,2379,CAL +29,22,1,1,3,1,1,147,2,3,5,2945,463,377.3,1259,1,2519,CAL +29,23,1,1,3,1,1,147,2,2,4,2944,497.4,377.3,1313,1,2627,CAL +30,0,1,0,3,1,1,107,5,4,18,2158,-480.2,411.6,1347,1,2695,CAL +30,1,1,0,3,1,1,107,5,3,17,2157,-445.9,411.6,1309,1,2619,CAL +30,2,1,0,3,1,1,107,5,2,16,2156,-411.6,411.6,1252,1,2505,CAL +30,3,1,0,3,0,1,105,3,5,11,2111,-377.3,411.6,1171,1,2343,CAL +30,4,1,0,3,0,1,105,3,4,10,2110,-343,411.6,1078,1,2157,CAL +30,5,1,0,3,0,1,105,3,3,9,2109,-308.7,411.6,983,1,1967,CAL +30,6,1,0,3,0,1,105,3,2,8,2108,-274.4,411.6,894,1,1789,CAL +30,7,1,0,4,2,1,117,4,4,14,2354,-240.1,411.6,811,1,1623,CAL +30,8,1,0,4,2,1,117,4,5,15,2355,-205.8,411.6,690,1,1381,CAL +30,9,1,0,4,2,1,117,5,2,16,2356,-171.5,411.6,691,1,1383,CAL +30,10,1,0,4,1,1,115,5,2,16,2316,-137.2,411.6,692,1,1385,CAL +30,11,1,0,4,1,1,115,5,3,17,2317,-102.9,411.6,693,1,1387,CAL +30,12,1,0,4,1,1,115,5,4,18,2318,-68.6,411.6,694,1,1389,CAL +30,13,1,0,4,0,1,113,5,4,19,2278,-34.3,411.6,695,1,1391,CAL +30,14,1,0,4,0,1,113,5,5,18,2279,0,411.6,696,1,1393,CAL +30,15,1,1,4,0,1,153,1,2,0,3060,34.3,411.6,697,1,1395,CAL +30,16,1,1,4,1,1,155,1,3,1,3101,68.6,411.6,698,1,1397,CAL +30,17,1,1,4,1,1,155,1,4,2,3102,102.9,411.6,699,1,1399,CAL +30,18,1,1,4,1,1,155,1,5,3,3103,137.2,411.6,700,1,1401,CAL +30,19,1,1,4,2,1,157,1,5,3,3143,171.5,411.6,701,1,1403,CAL +30,20,1,1,4,2,1,157,2,2,4,3144,205.8,411.6,702,1,1405,CAL +30,21,1,1,4,2,1,157,2,3,5,3145,240.1,411.6,748,1,1497,CAL +30,22,1,1,3,0,1,145,3,5,11,2911,274.4,411.6,828,1,1657,CAL +30,23,1,1,3,0,1,145,3,4,10,2910,308.7,411.6,914,1,1829,CAL +30,24,1,1,3,0,1,145,3,3,9,2909,343,411.6,1006,1,2013,CAL +30,25,1,1,3,0,1,145,3,2,8,2908,377.3,411.6,1100,1,2201,CAL +30,26,1,1,3,1,1,147,1,5,3,2943,411.6,411.6,1188,1,2377,CAL +30,27,1,1,3,1,1,147,1,4,2,2942,445.9,411.6,1258,1,2517,CAL +30,28,1,1,3,1,1,147,1,3,1,2941,480.2,411.6,1312,1,2625,CAL +31,0,1,0,3,1,1,107,5,5,19,2159,-428.8,445.9,1310,1,2621,CAL +31,1,1,0,3,0,1,105,4,5,15,2115,-394.5,445.9,1253,1,2507,CAL +31,2,1,0,3,0,1,105,4,4,14,2114,-360.1,445.9,1172,1,2345,CAL +31,3,1,0,3,0,1,105,4,3,13,2113,-325.9,445.9,1079,1,2159,CAL +31,4,1,0,3,0,1,105,4,2,12,2112,-291.5,445.9,984,1,1969,CAL +31,5,1,0,4,2,1,117,3,5,11,2351,-257.2,445.9,895,1,1791,CAL +31,6,1,0,4,2,1,117,4,2,12,2352,-222.9,445.9,734,1,1469,CAL +31,7,1,0,4,2,1,117,4,3,13,2353,-188.6,445.9,735,1,1471,CAL +31,8,1,0,4,1,1,115,4,3,13,2313,-154.4,445.9,736,1,1473,CAL +31,9,1,0,4,1,1,115,4,4,14,2314,-120.1,445.9,737,1,1475,CAL +31,10,1,0,4,1,1,115,4,5,15,2315,-85.8,445.9,738,1,1477,CAL +31,11,1,0,4,0,1,113,5,2,17,2276,-51.5,445.9,739,1,1479,CAL +31,12,1,0,4,0,1,113,5,3,16,2277,-17.1,445.9,740,1,1481,CAL +31,13,1,1,4,0,1,153,1,3,1,3061,17.1,445.9,741,1,1483,CAL +31,14,1,1,4,0,1,153,1,4,2,3062,51.5,445.9,742,1,1485,CAL +31,15,1,1,4,1,1,155,2,2,4,3104,85.8,445.9,743,1,1487,CAL +31,16,1,1,4,1,1,155,2,3,5,3105,120.1,445.9,744,1,1489,CAL +31,17,1,1,4,1,1,155,2,4,6,3106,154.4,445.9,745,1,1491,CAL +31,18,1,1,4,2,1,157,2,4,6,3146,188.6,445.9,746,1,1493,CAL +31,19,1,1,4,2,1,157,2,5,7,3147,222.9,445.9,747,1,1495,CAL +31,20,1,1,4,2,1,157,3,2,8,3148,257.2,445.9,827,1,1655,CAL +31,21,1,1,3,0,1,145,2,5,7,2907,291.5,445.9,913,1,1827,CAL +31,22,1,1,3,0,1,145,2,4,6,2906,325.9,445.9,1005,1,2011,CAL +31,23,1,1,3,0,1,145,2,3,5,2905,360.1,445.9,1099,1,2199,CAL +31,24,1,1,3,0,1,145,2,2,4,2904,394.5,445.9,1187,1,2375,CAL +31,25,1,1,3,1,1,147,1,2,0,2940,428.8,445.9,1257,1,2515,CAL +32,0,1,0,3,0,1,105,5,4,18,2118,-411.6,480.2,1311,1,2623,CAL +32,1,1,0,3,0,1,105,5,3,17,2117,-377.3,480.2,1254,1,2509,CAL +32,2,1,0,3,0,1,105,5,2,16,2116,-343,480.2,1173,1,2347,CAL +32,3,1,0,4,2,1,117,2,5,7,2347,-308.7,480.2,1080,1,2161,CAL +32,4,1,0,4,2,1,117,3,2,8,2348,-274.4,480.2,985,1,1971,CAL +32,5,1,0,4,2,1,117,3,3,9,2349,-240.1,480.2,812,1,1625,CAL +32,6,1,0,4,2,1,117,3,4,10,2350,-205.8,480.2,813,1,1627,CAL +32,7,1,0,4,1,1,115,3,4,10,2310,-171.5,480.2,814,1,1629,CAL +32,8,1,0,4,1,1,115,3,5,11,2311,-137.2,480.2,815,1,1631,CAL +32,9,1,0,4,1,1,115,4,2,12,2312,-102.9,480.2,816,1,1633,CAL +32,10,1,0,4,0,1,113,4,4,15,2274,-68.6,480.2,817,1,1635,CAL +32,11,1,0,4,0,1,113,4,5,14,2275,-34.3,480.2,818,1,1637,CAL +32,12,1,1,4,0,1,153,1,5,3,3063,0,480.2,819,1,1639,CAL +32,13,1,1,4,0,1,153,2,2,4,3064,34.3,480.2,820,1,1641,CAL +32,14,1,1,4,0,1,153,2,3,5,3065,68.6,480.2,821,1,1643,CAL +32,15,1,1,4,1,1,155,2,5,7,3107,102.9,480.2,822,1,1645,CAL +32,16,1,1,4,1,1,155,3,2,8,3108,137.2,480.2,823,1,1647,CAL +32,17,1,1,4,1,1,155,3,3,9,3109,171.5,480.2,824,1,1649,CAL +32,18,1,1,4,2,1,157,3,3,9,3149,205.8,480.2,825,1,1651,CAL +32,19,1,1,4,2,1,157,3,4,10,3150,240.1,480.2,826,1,1653,CAL +32,20,1,1,4,2,1,157,3,5,11,3151,274.4,480.2,912,1,1825,CAL +32,21,1,1,4,2,1,157,4,2,12,3152,308.7,480.2,1004,1,2009,CAL +32,22,1,1,3,0,1,145,1,5,3,2903,343,480.2,1098,1,2197,CAL +32,23,1,1,3,0,1,145,1,4,2,2902,377.3,480.2,1186,1,2373,CAL +32,24,1,1,3,0,1,145,1,3,1,2901,411.6,480.2,1256,1,2513,CAL +33,0,1,0,3,0,1,105,5,5,19,2119,-360.1,514.5,1255,1,2511,CAL +33,1,1,0,4,2,1,117,1,5,3,2343,-325.9,514.5,1174,1,2349,CAL +33,2,1,0,4,2,1,117,2,2,4,2344,-291.5,514.5,1081,1,2163,CAL +33,3,1,0,4,2,1,117,2,3,5,2345,-257.2,514.5,896,1,1793,CAL +33,4,1,0,4,2,1,117,2,4,6,2346,-222.9,514.5,897,1,1795,CAL +33,5,1,0,4,1,1,115,2,5,7,2307,-188.6,514.5,898,1,1797,CAL +33,6,1,0,4,1,1,115,3,2,8,2308,-154.4,514.5,899,1,1799,CAL +33,7,1,0,4,1,1,115,3,3,9,2309,-120.1,514.5,900,1,1801,CAL +33,8,1,0,4,0,0,113,3,5,13,2271,-85.8,514.5,901,0,1802,CAL +33,9,1,0,4,0,1,113,4,2,12,2272,-51.5,514.5,902,1,1805,CAL +33,10,1,0,4,0,1,113,4,3,11,2273,-17.1,514.5,903,1,1807,CAL +33,11,1,1,4,0,1,153,2,4,6,3066,17.1,514.5,904,1,1809,CAL +33,12,1,1,4,0,1,153,2,5,7,3067,51.5,514.5,905,1,1811,CAL +33,13,1,1,4,0,1,153,3,2,8,3068,85.8,514.5,906,1,1813,CAL +33,14,1,1,4,1,1,155,3,4,10,3110,120.1,514.5,907,1,1815,CAL +33,15,1,1,4,1,1,155,3,5,11,3111,154.4,514.5,908,1,1817,CAL +33,16,1,1,4,1,1,155,4,2,12,3112,188.6,514.5,909,1,1819,CAL +33,17,1,1,4,2,1,157,4,3,13,3153,222.9,514.5,910,1,1821,CAL +33,18,1,1,4,2,1,157,4,4,14,3154,257.2,514.5,911,1,1823,CAL +33,19,1,1,4,2,1,157,4,5,15,3155,291.5,514.5,1003,1,2007,CAL +33,20,1,1,4,2,1,157,5,2,16,3156,325.9,514.5,1097,1,2195,CAL +33,21,1,1,3,0,1,145,1,2,0,2900,360.1,514.5,1185,1,2371,CAL +34,0,1,0,4,2,1,117,1,2,0,2340,-308.7,548.8,1175,1,2351,CAL +34,1,1,0,4,2,1,117,1,3,1,2341,-274.4,548.8,986,1,1973,CAL +34,2,1,0,4,2,1,117,1,4,2,2342,-240.1,548.8,987,1,1975,CAL +34,3,1,0,4,1,1,115,1,5,3,2303,-205.8,548.8,988,1,1977,CAL +34,4,1,0,4,1,1,115,2,2,4,2304,-171.5,548.8,989,1,1979,CAL +34,5,1,0,4,1,1,115,2,3,5,2305,-137.2,548.8,990,1,1981,CAL +34,6,1,0,4,1,1,115,2,4,6,2306,-102.9,548.8,991,1,1983,CAL +34,7,1,0,4,0,1,113,3,2,10,2268,-68.6,548.8,992,1,1985,CAL +34,8,1,0,4,0,1,113,3,3,9,2269,-34.3,548.8,993,1,1987,CAL +34,9,1,0,4,0,1,113,3,4,8,2270,0,548.8,994,1,1989,CAL +34,10,1,1,4,0,1,153,3,3,9,3069,34.3,548.8,995,1,1991,CAL +34,11,1,1,4,0,1,153,3,4,10,3070,68.6,548.8,996,1,1993,CAL +34,12,1,1,4,0,1,153,3,5,11,3071,102.9,548.8,997,1,1995,CAL +34,13,1,1,4,1,1,155,4,3,13,3113,137.2,548.8,998,1,1997,CAL +34,14,1,1,4,1,1,155,4,4,14,3114,171.5,548.8,999,1,1999,CAL +34,15,1,1,4,1,1,155,4,5,15,3115,205.8,548.8,1000,1,2001,CAL +34,16,1,1,4,2,1,157,5,3,17,3157,240.1,548.8,1001,1,2003,CAL +34,17,1,1,4,2,1,157,5,4,18,3158,274.4,548.8,1002,1,2005,CAL +34,18,1,1,4,2,1,157,5,5,19,3159,308.7,548.8,1096,1,2193,CAL +35,0,1,0,4,1,1,115,1,2,0,2300,-222.9,583.1,1082,1,2165,CAL +35,1,1,0,4,1,1,115,1,3,1,2301,-188.6,583.1,1083,1,2167,CAL +35,2,1,0,4,1,1,115,1,4,2,2302,-154.4,583.1,1084,1,2169,CAL +35,3,1,0,4,0,1,113,2,2,7,2264,-120.1,583.1,1085,1,2171,CAL +35,4,1,0,4,0,1,113,2,3,6,2265,-85.8,583.1,1086,1,2173,CAL +35,5,1,0,4,0,1,113,2,4,5,2266,-51.5,583.1,1087,1,2175,CAL +35,6,1,0,4,0,1,113,2,5,4,2267,-17.1,583.1,1088,1,2177,CAL +35,7,1,1,4,0,1,153,4,2,12,3072,17.1,583.1,1089,1,2179,CAL +35,8,1,1,4,0,1,153,4,3,13,3073,51.5,583.1,1090,1,2181,CAL +35,9,1,1,4,0,1,153,4,4,14,3074,85.8,583.1,1091,1,2183,CAL +35,10,1,1,4,0,1,153,4,5,15,3075,120.1,583.1,1092,1,2185,CAL +35,11,1,1,4,1,1,155,5,2,16,3116,154.4,583.1,1093,1,2187,CAL +35,12,1,1,4,1,1,155,5,3,17,3117,188.6,583.1,1094,1,2189,CAL +35,13,1,1,4,1,1,155,5,4,18,3118,222.9,583.1,1095,1,2191,CAL +36,0,1,0,4,0,1,113,1,2,3,2260,-137.2,617.4,1176,1,2353,CAL +36,1,1,0,4,0,1,113,1,3,2,2261,-102.9,617.4,1177,1,2355,CAL +36,2,1,0,4,0,0,113,1,4,1,2262,-68.6,617.4,1178,0,2356,CAL +36,3,1,0,4,0,1,113,1,5,0,2263,-34.3,617.4,1179,1,2359,CAL +36,4,1,1,4,0,1,153,5,2,16,3076,0,617.4,1180,1,2361,CAL +36,5,1,1,4,0,1,153,5,3,17,3077,34.3,617.4,1181,1,2363,CAL +36,6,1,1,4,0,1,153,5,4,18,3078,68.6,617.4,1182,1,2365,CAL +36,7,1,1,4,0,1,153,5,5,19,3079,102.9,617.4,1183,1,2367,CAL +36,8,1,1,4,1,1,155,5,5,19,3119,137.2,617.4,1184,1,2369,CAL +-99,-99,1,0,1,1,0,90,5,5,19,1819,-700,50,4000,0,2704,PIN-DIODE +-99,-99,1,0,1,0,1,89,5,5,19,1799,-700,50,4001,1,2705,PIN-DIODE +-99,-99,1,0,1,0,0,88,5,5,19,1779,-700,-50,4011,0,2706,PIN-DIODE +-99,-99,1,0,1,1,1,91,5,5,19,1839,-700,-50,4010,1,2707,PIN-DIODE +-99,-99,1,1,1,0,0,128,1,2,0,2560,700,50,4100,0,2708,PIN-DIODE +-99,-99,1,1,1,0,1,129,1,2,0,2580,700,50,4101,1,2709,PIN-DIODE +-99,-99,1,1,1,1,0,130,1,2,0,2600,700,-50,4111,0,2710,PIN-DIODE +-99,-99,1,1,1,1,1,131,1,2,0,2620,700,-50,4110,1,2711,PIN-DIODE +-99,-99,1,0,1,0,0,88,-99,-99,18,1778,-999,-999,9999,0,9999,EMPTY +-99,-99,1,1,1,0,0,128,-99,-99,19,2579,-999,-999,9999,0,9999,EMPTY +-99,-99,1,0,1,0,0,89,-99,-99,18,1798,-999,-999,9999,0,9999,EMPTY +-99,-99,1,1,1,0,0,129,-99,-99,19,2599,-999,-999,9999,0,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,0,3200,-700,700,5000,0,2712,PIN-DIODE +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,1,3201,-700,700,5001,1,2713,PIN-DIODE +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,2,3202,700,700,5010,0,2714,PIN-DIODE +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,3,3203,700,700,5011,1,2715,PIN-DIODE +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,4,3204,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,5,3205,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,6,3206,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,7,3207,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,8,3208,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,9,3209,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,10,3210,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,11,3211,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,12,3212,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,13,3213,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,14,3214,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,15,3215,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,16,3216,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,17,3217,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,18,3218,-999,-999,9999,-99,9999,EMPTY +-99,-99,-99,-99,-99,-99,-99,160,-99,-99,19,3219,-999,-999,9999,-99,9999,EMPTY \ No newline at end of file diff --git a/CaloClusterGNN/data/crystal_neighbors.csv b/CaloClusterGNN/data/crystal_neighbors.csv new file mode 100644 index 0000000..774f00a --- /dev/null +++ b/CaloClusterGNN/data/crystal_neighbors.csv @@ -0,0 +1,1349 @@ +crystalId,neighbors +0,1;15;16;17;59 +1,0;2;17;18 +2,1;18;19 +3,4;25;26 +4,3;5;26;27 +5,4;6;27;28;29 +6,5;29;30 +7,8;36;37 +8,7;9;37;38;39 +9,8;10;39;40 +10,9;40;41 +11,12;47;48 +12,11;13;48;49 +13,12;14;49;50;51 +14,13;51;52 +15,0;58;59 +16,0;17;59;60;61;137 +17,0;1;16;18;61;62 +18,1;2;17;19;62;63 +19,2;18;20;63;64 +20,19;21;64;65 +21,20;22;65;66 +22,21;23;66;67 +23,22;24;67;68 +24,23;25;68;69 +25,3;24;26;69;70 +26,3;4;25;27;70;71 +27,4;5;26;28;71;72 +28,5;27;29;72;73;74 +29,5;6;28;30;74;75 +30,6;29;31;75;76 +31,30;76;77 +32,33;84;85 +33,32;34;85;86;87 +34,33;87;88 +35,36;95;96 +36,7;35;37;96;97 +37,7;8;36;38;97;98 +38,8;37;39;98;99;100 +39,8;9;38;40;100;101 +40,9;10;39;41;101;102 +41,10;40;42;102;103 +42,41;43;103;104 +43,42;44;104;105 +44,43;45;105;106 +45,44;46;106;107 +46,45;47;107;108 +47,11;46;48;108;109 +48,11;12;47;49;109;110 +49,12;13;48;50;110;111 +50,13;49;51;111;112;113 +51,13;14;50;52;113;114 +52,14;51;53;114;115 +53,52;115;116 +54,55;123;124 +55,54;56;124;125;126 +56,55;126;127 +57,58;134;135 +58,15;57;59;135;136 +59,0;15;16;58;136;137 +60,16;61;137;138;139;221 +61,16;17;60;62;139;140 +62,17;18;61;63;140;141 +63,18;19;62;64;141;142 +64,19;20;63;65;142;143 +65,20;21;64;66;143;144 +66,21;22;65;67;144;145 +67,22;23;66;68;145;146 +68,23;24;67;69;146;147 +69,24;25;68;70;147;148 +70,25;26;69;71;148;149 +71,26;27;70;72;149;150 +72,27;28;71;73;150;151 +73,28;72;74;151;152;153 +74,28;29;73;75;153;154 +75,29;30;74;76;154;155 +76,30;31;75;77;155;156 +77,31;76;78;156;157 +78,77;79;157;158 +79,78;80;158;159 +80,79;81;159;160 +81,80;82;160;161 +82,81;83;161;162 +83,82;84;162;163 +84,32;83;85;163;164 +85,32;33;84;86;164;165 +86,33;85;87;165;166;167 +87,33;34;86;88;167;168 +88,34;87;89;168;169 +89,88;90;169;170 +90,89;91;170;171 +91,90;92;171;172 +92,91;93;172;173 +93,92;94;173;174 +94,93;95;174;175 +95,35;94;96;175;176 +96,35;36;95;97;176;177 +97,36;37;96;98;177;178 +98,37;38;97;99;178;179 +99,38;98;100;179;180;181 +100,38;39;99;101;181;182 +101,39;40;100;102;182;183 +102,40;41;101;103;183;184 +103,41;42;102;104;184;185 +104,42;43;103;105;185;186 +105,43;44;104;106;186;187 +106,44;45;105;107;187;188 +107,45;46;106;108;188;189 +108,46;47;107;109;189;190 +109,47;48;108;110;190;191 +110,48;49;109;111;191;192 +111,49;50;110;112;192;193 +112,50;111;113;193;194;195 +113,50;51;112;114;195;196 +114,51;52;113;115;196;197 +115,52;53;114;116;197;198 +116,53;115;117;198;199 +117,116;118;199;200 +118,117;119;200;201 +119,118;120;201;202 +120,119;121;202;203 +121,120;122;203;204 +122,121;123;204;205 +123,54;122;124;205;206 +124,54;55;123;125;206;207 +125,55;124;126;207;208;209 +126,55;56;125;127;209;210 +127,56;126;128;210;211 +128,127;129;211;212 +129,128;130;212;213 +130,129;131;213;214 +131,130;132;214;215 +132,131;133;215;216 +133,132;134;216;217 +134,57;133;135;217;218 +135,57;58;134;136;218;219 +136,58;59;135;137;219;220 +137,16;59;60;136;220;221 +138,60;139;221;222;223;311 +139,60;61;138;140;223;224 +140,61;62;139;141;224;225 +141,62;63;140;142;225;226 +142,63;64;141;143;226;227 +143,64;65;142;144;227;228 +144,65;66;143;145;228;229 +145,66;67;144;146;229;230 +146,67;68;145;147;230;231 +147,68;69;146;148;231;232 +148,69;70;147;149;232;233 +149,70;71;148;150;233;234 +150,71;72;149;151;234;235 +151,72;73;150;152;235;236 +152,73;151;153;236;237;238 +153,73;74;152;154;238;239 +154,74;75;153;155;239;240 +155,75;76;154;156;240;241 +156,76;77;155;157;241;242 +157,77;78;156;158;242;243 +158,78;79;157;159;243;244 +159,79;80;158;160;244;245 +160,80;81;159;161;245;246 +161,81;82;160;162;246;247 +162,82;83;161;163;247;248 +163,83;84;162;164;248;249 +164,84;85;163;165;249;250 +165,85;86;164;166;250;251 +166,86;165;167;251;252;253 +167,86;87;166;168;253;254 +168,87;88;167;169;254;255 +169,88;89;168;170;255;256 +170,89;90;169;171;256;257 +171,90;91;170;172;257;258 +172,91;92;171;173;258;259 +173,92;93;172;174;259;260 +174,93;94;173;175;260;261 +175,94;95;174;176;261;262 +176,95;96;175;177;262;263 +177,96;97;176;178;263;264 +178,97;98;177;179;264;265 +179,98;99;178;180;265;266 +180,99;179;181;266;267;268 +181,99;100;180;182;268;269 +182,100;101;181;183;269;270 +183,101;102;182;184;270;271 +184,102;103;183;185;271;272 +185,103;104;184;186;272;273 +186,104;105;185;187;273;274 +187,105;106;186;188;274;275 +188,106;107;187;189;275;276 +189,107;108;188;190;276;277 +190,108;109;189;191;277;278 +191,109;110;190;192;278;279 +192,110;111;191;193;279;280 +193,111;112;192;194;280;281 +194,112;193;195;281;282;283 +195,112;113;194;196;283;284 +196,113;114;195;197;284;285 +197,114;115;196;198;285;286 +198,115;116;197;199;286;287 +199,116;117;198;200;287;288 +200,117;118;199;201;288;289 +201,118;119;200;202;289;290 +202,119;120;201;203;290;291 +203,120;121;202;204;291;292 +204,121;122;203;205;292;293 +205,122;123;204;206;293;294 +206,123;124;205;207;294;295 +207,124;125;206;208;295;296 +208,125;207;209;296;297;298 +209,125;126;208;210;298;299 +210,126;127;209;211;299;300 +211,127;128;210;212;300;301 +212,128;129;211;213;301;302 +213,129;130;212;214;302;303 +214,130;131;213;215;303;304 +215,131;132;214;216;304;305 +216,132;133;215;217;305;306 +217,133;134;216;218;306;307 +218,134;135;217;219;307;308 +219,135;136;218;220;308;309 +220,136;137;219;221;309;310 +221,60;137;138;220;310;311 +222,138;223;311;312;313;407 +223,138;139;222;224;313;314 +224,139;140;223;225;314;315 +225,140;141;224;226;315;316 +226,141;142;225;227;316;317 +227,142;143;226;228;317;318 +228,143;144;227;229;318;319 +229,144;145;228;230;319;320 +230,145;146;229;231;320;321 +231,146;147;230;232;321;322 +232,147;148;231;233;322;323 +233,148;149;232;234;323;324 +234,149;150;233;235;324;325 +235,150;151;234;236;325;326 +236,151;152;235;237;326;327 +237,152;236;238;327;328;329 +238,152;153;237;239;329;330 +239,153;154;238;240;330;331 +240,154;155;239;241;331;332 +241,155;156;240;242;332;333 +242,156;157;241;243;333;334 +243,157;158;242;244;334;335 +244,158;159;243;245;335;336 +245,159;160;244;246;336;337 +246,160;161;245;247;337;338 +247,161;162;246;248;338;339 +248,162;163;247;249;339;340 +249,163;164;248;250;340;341 +250,164;165;249;251;341;342 +251,165;166;250;252;342;343 +252,166;251;253;343;344;345 +253,166;167;252;254;345;346 +254,167;168;253;255;346;347 +255,168;169;254;256;347;348 +256,169;170;255;257;348;349 +257,170;171;256;258;349;350 +258,171;172;257;259;350;351 +259,172;173;258;260;351;352 +260,173;174;259;261;352;353 +261,174;175;260;262;353;354 +262,175;176;261;263;354;355 +263,176;177;262;264;355;356 +264,177;178;263;265;356;357 +265,178;179;264;266;357;358 +266,179;180;265;267;358;359 +267,180;266;268;359;360;361 +268,180;181;267;269;361;362 +269,181;182;268;270;362;363 +270,182;183;269;271;363;364 +271,183;184;270;272;364;365 +272,184;185;271;273;365;366 +273,185;186;272;274;366;367 +274,186;187;273;275;367;368 +275,187;188;274;276;368;369 +276,188;189;275;277;369;370 +277,189;190;276;278;370;371 +278,190;191;277;279;371;372 +279,191;192;278;280;372;373 +280,192;193;279;281;373;374 +281,193;194;280;282;374;375 +282,194;281;283;375;376;377 +283,194;195;282;284;377;378 +284,195;196;283;285;378;379 +285,196;197;284;286;379;380 +286,197;198;285;287;380;381 +287,198;199;286;288;381;382 +288,199;200;287;289;382;383 +289,200;201;288;290;383;384 +290,201;202;289;291;384;385 +291,202;203;290;292;385;386 +292,203;204;291;293;386;387 +293,204;205;292;294;387;388 +294,205;206;293;295;388;389 +295,206;207;294;296;389;390 +296,207;208;295;297;390;391 +297,208;296;298;391;392;393 +298,208;209;297;299;393;394 +299,209;210;298;300;394;395 +300,210;211;299;301;395;396 +301,211;212;300;302;396;397 +302,212;213;301;303;397;398 +303,213;214;302;304;398;399 +304,214;215;303;305;399;400 +305,215;216;304;306;400;401 +306,216;217;305;307;401;402 +307,217;218;306;308;402;403 +308,218;219;307;309;403;404 +309,219;220;308;310;404;405 +310,220;221;309;311;405;406 +311,138;221;222;310;406;407 +312,222;313;407;501 +313,222;223;312;314;408 +314,223;224;313;315;408;409 +315,224;225;314;316;409;410 +316,225;226;315;317;410;411 +317,226;227;316;318;411;412 +318,227;228;317;319;412;413 +319,228;229;318;320;413;414 +320,229;230;319;321;414;415 +321,230;231;320;322;415;416 +322,231;232;321;323;416;417 +323,232;233;322;324;417;418 +324,233;234;323;325;418;419 +325,234;235;324;326;419;420 +326,235;236;325;327;420;421 +327,236;237;326;328;421 +328,237;327;329;422 +329,237;238;328;330;422;423 +330,238;239;329;331;423;424 +331,239;240;330;332;424;425 +332,240;241;331;333;425;426 +333,241;242;332;334;426;427 +334,242;243;333;335;427;428 +335,243;244;334;336;428;429 +336,244;245;335;337;429;430 +337,245;246;336;338;430;431 +338,246;247;337;339;431;432 +339,247;248;338;340;432;433 +340,248;249;339;341;433;434 +341,249;250;340;342;434;435 +342,250;251;341;343;435;436 +343,251;252;342;344;436;437 +344,252;343;345;437;438;439 +345,252;253;344;346;439;440 +346,253;254;345;347;440;441 +347,254;255;346;348;441;442 +348,255;256;347;349;442;443 +349,256;257;348;350;443;444 +350,257;258;349;351;444;445 +351,258;259;350;352;445;446 +352,259;260;351;353;446;447 +353,260;261;352;354;447;448 +354,261;262;353;355;448;449 +355,262;263;354;356;449;450 +356,263;264;355;357;450;451 +357,264;265;356;358;451;452 +358,265;266;357;359;452;453 +359,266;267;358;360;453;454 +360,267;359;361;454 +361,267;268;360;362;455 +362,268;269;361;363;455;456 +363,269;270;362;364;456;457 +364,270;271;363;365;457;458 +365,271;272;364;366;458;459 +366,272;273;365;367;459;460 +367,273;274;366;368;460;461 +368,274;275;367;369;461;462 +369,275;276;368;370;462;463 +370,276;277;369;371;463;464 +371,277;278;370;372;464;465 +372,278;279;371;373;465;466 +373,279;280;372;374;466;467 +374,280;281;373;375;467;468 +375,281;282;374;376;468 +376,282;375;377;469 +377,282;283;376;378;469;470 +378,283;284;377;379;470;471 +379,284;285;378;380;471;472 +380,285;286;379;381;472;473 +381,286;287;380;382;473;474 +382,287;288;381;383;474;475 +383,288;289;382;384;475;476 +384,289;290;383;385;476;477 +385,290;291;384;386;477;478 +386,291;292;385;387;478;479 +387,292;293;386;388;479;480 +388,293;294;387;389;480;481 +389,294;295;388;390;481;482 +390,295;296;389;391;482;483 +391,296;297;390;392;483;484 +392,297;391;393;484;485;486 +393,297;298;392;394;486;487 +394,298;299;393;395;487;488 +395,299;300;394;396;488;489 +396,300;301;395;397;489;490 +397,301;302;396;398;490;491 +398,302;303;397;399;491;492 +399,303;304;398;400;492;493 +400,304;305;399;401;493;494 +401,305;306;400;402;494;495 +402,306;307;401;403;495;496 +403,307;308;402;404;496;497 +404,308;309;403;405;497;498 +405,309;310;404;406;498;499 +406,310;311;405;407;499;500 +407,222;311;312;406;500;501 +408,313;314;409 +409,314;315;408;410 +410,315;316;409;411;502 +411,316;317;410;412;502;503 +412,317;318;411;413;503;504 +413,318;319;412;414;504;505 +414,319;320;413;415;505;506 +415,320;321;414;416;506;507 +416,321;322;415;417;507;508 +417,322;323;416;418;508;509 +418,323;324;417;419;509;510 +419,324;325;418;420;510 +420,325;326;419;421 +421,326;327;420 +422,328;329;423 +423,329;330;422;424;511 +424,330;331;423;425;511;512 +425,331;332;424;426;512;513 +426,332;333;425;427;513;514 +427,333;334;426;428;514;515 +428,334;335;427;429;515;516 +429,335;336;428;430;516;517 +430,336;337;429;431;517;518 +431,337;338;430;432;518;519 +432,338;339;431;433;519;520 +433,339;340;432;434;520;521 +434,340;341;433;435;521;522 +435,341;342;434;436;522;523 +436,342;343;435;437;523;524 +437,343;344;436;438;524;525 +438,344;437;439;525;526;527 +439,344;345;438;440;527;528 +440,345;346;439;441;528;529 +441,346;347;440;442;529;530 +442,347;348;441;443;530;531 +443,348;349;442;444;531;532 +444,349;350;443;445;532;533 +445,350;351;444;446;533;534 +446,351;352;445;447;534;535 +447,352;353;446;448;535;536 +448,353;354;447;449;536;537 +449,354;355;448;450;537;538 +450,355;356;449;451;538;539 +451,356;357;450;452;539;540 +452,357;358;451;453;540;541 +453,358;359;452;454;541 +454,359;360;453 +455,361;362;456 +456,362;363;455;457 +457,363;364;456;458;542 +458,364;365;457;459;542;543 +459,365;366;458;460;543;544 +460,366;367;459;461;544;545 +461,367;368;460;462;545;546 +462,368;369;461;463;546;547 +463,369;370;462;464;547;548 +464,370;371;463;465;548;549 +465,371;372;464;466;549;550 +466,372;373;465;467;550 +467,373;374;466;468 +468,374;375;467 +469,376;377;470 +470,377;378;469;471;551 +471,378;379;470;472;551;552 +472,379;380;471;473;552;553 +473,380;381;472;474;553;554 +474,381;382;473;475;554;555 +475,382;383;474;476;555;556 +476,383;384;475;477;556;557 +477,384;385;476;478;557;558 +478,385;386;477;479;558;559 +479,386;387;478;480;559;560 +480,387;388;479;481;560;561 +481,388;389;480;482;561;562 +482,389;390;481;483;562;563 +483,390;391;482;484;563;564 +484,391;392;483;485;564;565 +485,392;484;486;565;566;567 +486,392;393;485;487;567;568 +487,393;394;486;488;568;569 +488,394;395;487;489;569;570 +489,395;396;488;490;570;571 +490,396;397;489;491;571;572 +491,397;398;490;492;572;573 +492,398;399;491;493;573;574 +493,399;400;492;494;574;575 +494,400;401;493;495;575;576 +495,401;402;494;496;576;577 +496,402;403;495;497;577;578 +497,403;404;496;498;578;579 +498,404;405;497;499;579;580 +499,405;406;498;500;580;581 +500,406;407;499;501;581 +501,312;407;500 +502,410;411;503 +503,411;412;502;504 +504,412;413;503;505 +505,413;414;504;506 +506,414;415;505;507 +507,415;416;506;508 +508,416;417;507;509 +509,417;418;508;510 +510,418;419;509 +511,423;424;512 +512,424;425;511;513;582 +513,425;426;512;514;582;583 +514,426;427;513;515;583;584 +515,427;428;514;516;584;585 +516,428;429;515;517;585;586 +517,429;430;516;518;586;587 +518,430;431;517;519;587;588 +519,431;432;518;520;588;589 +520,432;433;519;521;589;590 +521,433;434;520;522;590;591 +522,434;435;521;523;591;592 +523,435;436;522;524;592;593 +524,436;437;523;525;593;594 +525,437;438;524;526;594;595 +526,438;525;527;595;596 +527,438;439;526;528;596;597 +528,439;440;527;529;597;598 +529,440;441;528;530;598;599 +530,441;442;529;531;599;600 +531,442;443;530;532;600;601 +532,443;444;531;533;601;602 +533,444;445;532;534;602;603 +534,445;446;533;535;603;604 +535,446;447;534;536;604;605 +536,447;448;535;537;605;606 +537,448;449;536;538;606;607 +538,449;450;537;539;607;608 +539,450;451;538;540;608;609 +540,451;452;539;541;609 +541,452;453;540 +542,457;458;543 +543,458;459;542;544 +544,459;460;543;545 +545,460;461;544;546 +546,461;462;545;547 +547,462;463;546;548 +548,463;464;547;549 +549,464;465;548;550 +550,465;466;549 +551,470;471;552 +552,471;472;551;553;610 +553,472;473;552;554;610;611 +554,473;474;553;555;611;612 +555,474;475;554;556;612;613 +556,475;476;555;557;613;614 +557,476;477;556;558;614;615 +558,477;478;557;559;615;616 +559,478;479;558;560;616;617 +560,479;480;559;561;617;618 +561,480;481;560;562;618;619 +562,481;482;561;563;619;620 +563,482;483;562;564;620;621 +564,483;484;563;565;621;622 +565,484;485;564;566;622;623 +566,485;565;567;623;624 +567,485;486;566;568;624;625 +568,486;487;567;569;625;626 +569,487;488;568;570;626;627 +570,488;489;569;571;627;628 +571,489;490;570;572;628;629 +572,490;491;571;573;629;630 +573,491;492;572;574;630;631 +574,492;493;573;575;631;632 +575,493;494;574;576;632;633 +576,494;495;575;577;633;634 +577,495;496;576;578;634;635 +578,496;497;577;579;635;636 +579,497;498;578;580;636;637 +580,498;499;579;581;637 +581,499;500;580 +582,512;513;583 +583,513;514;582;584 +584,514;515;583;585;638 +585,515;516;584;586;638;639 +586,516;517;585;587;639;640 +587,517;518;586;588;640;641 +588,518;519;587;589;641;642 +589,519;520;588;590;642;643 +590,520;521;589;591;643;644 +591,521;522;590;592;644;645 +592,522;523;591;593;645;646 +593,523;524;592;594;646 +594,524;525;593;595 +595,525;526;594 +596,526;527;597 +597,527;528;596;598 +598,528;529;597;599;647 +599,529;530;598;600;647;648 +600,530;531;599;601;648;649 +601,531;532;600;602;649;650 +602,532;533;601;603;650;651 +603,533;534;602;604;651;652 +604,534;535;603;605;652;653 +605,535;536;604;606;653;654 +606,536;537;605;607;654;655 +607,537;538;606;608;655 +608,538;539;607;609 +609,539;540;608 +610,552;553;611 +611,553;554;610;612 +612,554;555;611;613;656 +613,555;556;612;614;656;657 +614,556;557;613;615;657;658 +615,557;558;614;616;658;659 +616,558;559;615;617;659;660 +617,559;560;616;618;660;661 +618,560;561;617;619;661;662 +619,561;562;618;620;662;663 +620,562;563;619;621;663;664 +621,563;564;620;622;664 +622,564;565;621;623 +623,565;566;622 +624,566;567;625 +625,567;568;624;626 +626,568;569;625;627;665 +627,569;570;626;628;665;666 +628,570;571;627;629;666;667 +629,571;572;628;630;667;668 +630,572;573;629;631;668;669 +631,573;574;630;632;669;670 +632,574;575;631;633;670;671 +633,575;576;632;634;671;672 +634,576;577;633;635;672;673 +635,577;578;634;636;673 +636,578;579;635;637 +637,579;580;636 +638,584;585;639 +639,585;586;638;640 +640,586;587;639;641 +641,587;588;640;642 +642,588;589;641;643 +643,589;590;642;644 +644,590;591;643;645 +645,591;592;644;646 +646,592;593;645 +647,598;599;648 +648,599;600;647;649 +649,600;601;648;650 +650,601;602;649;651 +651,602;603;650;652 +652,603;604;651;653 +653,604;605;652;654 +654,605;606;653;655 +655,606;607;654 +656,612;613;657 +657,613;614;656;658 +658,614;615;657;659 +659,615;616;658;660 +660,616;617;659;661 +661,617;618;660;662 +662,618;619;661;663 +663,619;620;662;664 +664,620;621;663 +665,626;627;666 +666,627;628;665;667 +667,628;629;666;668 +668,629;630;667;669 +669,630;631;668;670 +670,631;632;669;671 +671,632;633;670;672 +672,633;634;671;673 +673,634;635;672 +674,675;689;690;691;733 +675,674;676;691;692 +676,675;692;693 +677,678;699;700 +678,677;679;700;701 +679,678;680;701;702;703 +680,679;703;704 +681,682;710;711 +682,681;683;711;712;713 +683,682;684;713;714 +684,683;714;715 +685,686;721;722 +686,685;687;722;723 +687,686;688;723;724;725 +688,687;725;726 +689,674;732;733 +690,674;691;733;734;735;811 +691,674;675;690;692;735;736 +692,675;676;691;693;736;737 +693,676;692;694;737;738 +694,693;695;738;739 +695,694;696;739;740 +696,695;697;740;741 +697,696;698;741;742 +698,697;699;742;743 +699,677;698;700;743;744 +700,677;678;699;701;744;745 +701,678;679;700;702;745;746 +702,679;701;703;746;747;748 +703,679;680;702;704;748;749 +704,680;703;705;749;750 +705,704;750;751 +706,707;758;759 +707,706;708;759;760;761 +708,707;761;762 +709,710;769;770 +710,681;709;711;770;771 +711,681;682;710;712;771;772 +712,682;711;713;772;773;774 +713,682;683;712;714;774;775 +714,683;684;713;715;775;776 +715,684;714;716;776;777 +716,715;717;777;778 +717,716;718;778;779 +718,717;719;779;780 +719,718;720;780;781 +720,719;721;781;782 +721,685;720;722;782;783 +722,685;686;721;723;783;784 +723,686;687;722;724;784;785 +724,687;723;725;785;786;787 +725,687;688;724;726;787;788 +726,688;725;727;788;789 +727,726;789;790 +728,729;797;798 +729,728;730;798;799;800 +730,729;800;801 +731,732;808;809 +732,689;731;733;809;810 +733,674;689;690;732;810;811 +734,690;735;811;812;813;895 +735,690;691;734;736;813;814 +736,691;692;735;737;814;815 +737,692;693;736;738;815;816 +738,693;694;737;739;816;817 +739,694;695;738;740;817;818 +740,695;696;739;741;818;819 +741,696;697;740;742;819;820 +742,697;698;741;743;820;821 +743,698;699;742;744;821;822 +744,699;700;743;745;822;823 +745,700;701;744;746;823;824 +746,701;702;745;747;824;825 +747,702;746;748;825;826;827 +748,702;703;747;749;827;828 +749,703;704;748;750;828;829 +750,704;705;749;751;829;830 +751,705;750;752;830;831 +752,751;753;831;832 +753,752;754;832;833 +754,753;755;833;834 +755,754;756;834;835 +756,755;757;835;836 +757,756;758;836;837 +758,706;757;759;837;838 +759,706;707;758;760;838;839 +760,707;759;761;839;840;841 +761,707;708;760;762;841;842 +762,708;761;763;842;843 +763,762;764;843;844 +764,763;765;844;845 +765,764;766;845;846 +766,765;767;846;847 +767,766;768;847;848 +768,767;769;848;849 +769,709;768;770;849;850 +770,709;710;769;771;850;851 +771,710;711;770;772;851;852 +772,711;712;771;773;852;853 +773,712;772;774;853;854;855 +774,712;713;773;775;855;856 +775,713;714;774;776;856;857 +776,714;715;775;777;857;858 +777,715;716;776;778;858;859 +778,716;717;777;779;859;860 +779,717;718;778;780;860;861 +780,718;719;779;781;861;862 +781,719;720;780;782;862;863 +782,720;721;781;783;863;864 +783,721;722;782;784;864;865 +784,722;723;783;785;865;866 +785,723;724;784;786;866;867 +786,724;785;787;867;868;869 +787,724;725;786;788;869;870 +788,725;726;787;789;870;871 +789,726;727;788;790;871;872 +790,727;789;791;872;873 +791,790;792;873;874 +792,791;793;874;875 +793,792;794;875;876 +794,793;795;876;877 +795,794;796;877;878 +796,795;797;878;879 +797,728;796;798;879;880 +798,728;729;797;799;880;881 +799,729;798;800;881;882;883 +800,729;730;799;801;883;884 +801,730;800;802;884;885 +802,801;803;885;886 +803,802;804;886;887 +804,803;805;887;888 +805,804;806;888;889 +806,805;807;889;890 +807,806;808;890;891 +808,731;807;809;891;892 +809,731;732;808;810;892;893 +810,732;733;809;811;893;894 +811,690;733;734;810;894;895 +812,734;813;895;896;897;985 +813,734;735;812;814;897;898 +814,735;736;813;815;898;899 +815,736;737;814;816;899;900 +816,737;738;815;817;900;901 +817,738;739;816;818;901;902 +818,739;740;817;819;902;903 +819,740;741;818;820;903;904 +820,741;742;819;821;904;905 +821,742;743;820;822;905;906 +822,743;744;821;823;906;907 +823,744;745;822;824;907;908 +824,745;746;823;825;908;909 +825,746;747;824;826;909;910 +826,747;825;827;910;911;912 +827,747;748;826;828;912;913 +828,748;749;827;829;913;914 +829,749;750;828;830;914;915 +830,750;751;829;831;915;916 +831,751;752;830;832;916;917 +832,752;753;831;833;917;918 +833,753;754;832;834;918;919 +834,754;755;833;835;919;920 +835,755;756;834;836;920;921 +836,756;757;835;837;921;922 +837,757;758;836;838;922;923 +838,758;759;837;839;923;924 +839,759;760;838;840;924;925 +840,760;839;841;925;926;927 +841,760;761;840;842;927;928 +842,761;762;841;843;928;929 +843,762;763;842;844;929;930 +844,763;764;843;845;930;931 +845,764;765;844;846;931;932 +846,765;766;845;847;932;933 +847,766;767;846;848;933;934 +848,767;768;847;849;934;935 +849,768;769;848;850;935;936 +850,769;770;849;851;936;937 +851,770;771;850;852;937;938 +852,771;772;851;853;938;939 +853,772;773;852;854;939;940 +854,773;853;855;940;941;942 +855,773;774;854;856;942;943 +856,774;775;855;857;943;944 +857,775;776;856;858;944;945 +858,776;777;857;859;945;946 +859,777;778;858;860;946;947 +860,778;779;859;861;947;948 +861,779;780;860;862;948;949 +862,780;781;861;863;949;950 +863,781;782;862;864;950;951 +864,782;783;863;865;951;952 +865,783;784;864;866;952;953 +866,784;785;865;867;953;954 +867,785;786;866;868;954;955 +868,786;867;869;955;956;957 +869,786;787;868;870;957;958 +870,787;788;869;871;958;959 +871,788;789;870;872;959;960 +872,789;790;871;873;960;961 +873,790;791;872;874;961;962 +874,791;792;873;875;962;963 +875,792;793;874;876;963;964 +876,793;794;875;877;964;965 +877,794;795;876;878;965;966 +878,795;796;877;879;966;967 +879,796;797;878;880;967;968 +880,797;798;879;881;968;969 +881,798;799;880;882;969;970 +882,799;881;883;970;971;972 +883,799;800;882;884;972;973 +884,800;801;883;885;973;974 +885,801;802;884;886;974;975 +886,802;803;885;887;975;976 +887,803;804;886;888;976;977 +888,804;805;887;889;977;978 +889,805;806;888;890;978;979 +890,806;807;889;891;979;980 +891,807;808;890;892;980;981 +892,808;809;891;893;981;982 +893,809;810;892;894;982;983 +894,810;811;893;895;983;984 +895,734;811;812;894;984;985 +896,812;897;985;986;987;1081 +897,812;813;896;898;987;988 +898,813;814;897;899;988;989 +899,814;815;898;900;989;990 +900,815;816;899;901;990;991 +901,816;817;900;902;991;992 +902,817;818;901;903;992;993 +903,818;819;902;904;993;994 +904,819;820;903;905;994;995 +905,820;821;904;906;995;996 +906,821;822;905;907;996;997 +907,822;823;906;908;997;998 +908,823;824;907;909;998;999 +909,824;825;908;910;999;1000 +910,825;826;909;911;1000;1001 +911,826;910;912;1001;1002;1003 +912,826;827;911;913;1003;1004 +913,827;828;912;914;1004;1005 +914,828;829;913;915;1005;1006 +915,829;830;914;916;1006;1007 +916,830;831;915;917;1007;1008 +917,831;832;916;918;1008;1009 +918,832;833;917;919;1009;1010 +919,833;834;918;920;1010;1011 +920,834;835;919;921;1011;1012 +921,835;836;920;922;1012;1013 +922,836;837;921;923;1013;1014 +923,837;838;922;924;1014;1015 +924,838;839;923;925;1015;1016 +925,839;840;924;926;1016;1017 +926,840;925;927;1017;1018;1019 +927,840;841;926;928;1019;1020 +928,841;842;927;929;1020;1021 +929,842;843;928;930;1021;1022 +930,843;844;929;931;1022;1023 +931,844;845;930;932;1023;1024 +932,845;846;931;933;1024;1025 +933,846;847;932;934;1025;1026 +934,847;848;933;935;1026;1027 +935,848;849;934;936;1027;1028 +936,849;850;935;937;1028;1029 +937,850;851;936;938;1029;1030 +938,851;852;937;939;1030;1031 +939,852;853;938;940;1031;1032 +940,853;854;939;941;1032;1033 +941,854;940;942;1033;1034;1035 +942,854;855;941;943;1035;1036 +943,855;856;942;944;1036;1037 +944,856;857;943;945;1037;1038 +945,857;858;944;946;1038;1039 +946,858;859;945;947;1039;1040 +947,859;860;946;948;1040;1041 +948,860;861;947;949;1041;1042 +949,861;862;948;950;1042;1043 +950,862;863;949;951;1043;1044 +951,863;864;950;952;1044;1045 +952,864;865;951;953;1045;1046 +953,865;866;952;954;1046;1047 +954,866;867;953;955;1047;1048 +955,867;868;954;956;1048;1049 +956,868;955;957;1049;1050;1051 +957,868;869;956;958;1051;1052 +958,869;870;957;959;1052;1053 +959,870;871;958;960;1053;1054 +960,871;872;959;961;1054;1055 +961,872;873;960;962;1055;1056 +962,873;874;961;963;1056;1057 +963,874;875;962;964;1057;1058 +964,875;876;963;965;1058;1059 +965,876;877;964;966;1059;1060 +966,877;878;965;967;1060;1061 +967,878;879;966;968;1061;1062 +968,879;880;967;969;1062;1063 +969,880;881;968;970;1063;1064 +970,881;882;969;971;1064;1065 +971,882;970;972;1065;1066;1067 +972,882;883;971;973;1067;1068 +973,883;884;972;974;1068;1069 +974,884;885;973;975;1069;1070 +975,885;886;974;976;1070;1071 +976,886;887;975;977;1071;1072 +977,887;888;976;978;1072;1073 +978,888;889;977;979;1073;1074 +979,889;890;978;980;1074;1075 +980,890;891;979;981;1075;1076 +981,891;892;980;982;1076;1077 +982,892;893;981;983;1077;1078 +983,893;894;982;984;1078;1079 +984,894;895;983;985;1079;1080 +985,812;895;896;984;1080;1081 +986,896;987;1081;1175 +987,896;897;986;988;1082 +988,897;898;987;989;1082;1083 +989,898;899;988;990;1083;1084 +990,899;900;989;991;1084;1085 +991,900;901;990;992;1085;1086 +992,901;902;991;993;1086;1087 +993,902;903;992;994;1087;1088 +994,903;904;993;995;1088;1089 +995,904;905;994;996;1089;1090 +996,905;906;995;997;1090;1091 +997,906;907;996;998;1091;1092 +998,907;908;997;999;1092;1093 +999,908;909;998;1000;1093;1094 +1000,909;910;999;1001;1094;1095 +1001,910;911;1000;1002;1095 +1002,911;1001;1003;1096 +1003,911;912;1002;1004;1096;1097 +1004,912;913;1003;1005;1097;1098 +1005,913;914;1004;1006;1098;1099 +1006,914;915;1005;1007;1099;1100 +1007,915;916;1006;1008;1100;1101 +1008,916;917;1007;1009;1101;1102 +1009,917;918;1008;1010;1102;1103 +1010,918;919;1009;1011;1103;1104 +1011,919;920;1010;1012;1104;1105 +1012,920;921;1011;1013;1105;1106 +1013,921;922;1012;1014;1106;1107 +1014,922;923;1013;1015;1107;1108 +1015,923;924;1014;1016;1108;1109 +1016,924;925;1015;1017;1109;1110 +1017,925;926;1016;1018;1110;1111 +1018,926;1017;1019;1111;1112;1113 +1019,926;927;1018;1020;1113;1114 +1020,927;928;1019;1021;1114;1115 +1021,928;929;1020;1022;1115;1116 +1022,929;930;1021;1023;1116;1117 +1023,930;931;1022;1024;1117;1118 +1024,931;932;1023;1025;1118;1119 +1025,932;933;1024;1026;1119;1120 +1026,933;934;1025;1027;1120;1121 +1027,934;935;1026;1028;1121;1122 +1028,935;936;1027;1029;1122;1123 +1029,936;937;1028;1030;1123;1124 +1030,937;938;1029;1031;1124;1125 +1031,938;939;1030;1032;1125;1126 +1032,939;940;1031;1033;1126;1127 +1033,940;941;1032;1034;1127;1128 +1034,941;1033;1035;1128 +1035,941;942;1034;1036;1129 +1036,942;943;1035;1037;1129;1130 +1037,943;944;1036;1038;1130;1131 +1038,944;945;1037;1039;1131;1132 +1039,945;946;1038;1040;1132;1133 +1040,946;947;1039;1041;1133;1134 +1041,947;948;1040;1042;1134;1135 +1042,948;949;1041;1043;1135;1136 +1043,949;950;1042;1044;1136;1137 +1044,950;951;1043;1045;1137;1138 +1045,951;952;1044;1046;1138;1139 +1046,952;953;1045;1047;1139;1140 +1047,953;954;1046;1048;1140;1141 +1048,954;955;1047;1049;1141;1142 +1049,955;956;1048;1050;1142 +1050,956;1049;1051;1143 +1051,956;957;1050;1052;1143;1144 +1052,957;958;1051;1053;1144;1145 +1053,958;959;1052;1054;1145;1146 +1054,959;960;1053;1055;1146;1147 +1055,960;961;1054;1056;1147;1148 +1056,961;962;1055;1057;1148;1149 +1057,962;963;1056;1058;1149;1150 +1058,963;964;1057;1059;1150;1151 +1059,964;965;1058;1060;1151;1152 +1060,965;966;1059;1061;1152;1153 +1061,966;967;1060;1062;1153;1154 +1062,967;968;1061;1063;1154;1155 +1063,968;969;1062;1064;1155;1156 +1064,969;970;1063;1065;1156;1157 +1065,970;971;1064;1066;1157;1158 +1066,971;1065;1067;1158;1159;1160 +1067,971;972;1066;1068;1160;1161 +1068,972;973;1067;1069;1161;1162 +1069,973;974;1068;1070;1162;1163 +1070,974;975;1069;1071;1163;1164 +1071,975;976;1070;1072;1164;1165 +1072,976;977;1071;1073;1165;1166 +1073,977;978;1072;1074;1166;1167 +1074,978;979;1073;1075;1167;1168 +1075,979;980;1074;1076;1168;1169 +1076,980;981;1075;1077;1169;1170 +1077,981;982;1076;1078;1170;1171 +1078,982;983;1077;1079;1171;1172 +1079,983;984;1078;1080;1172;1173 +1080,984;985;1079;1081;1173;1174 +1081,896;985;986;1080;1174;1175 +1082,987;988;1083 +1083,988;989;1082;1084 +1084,989;990;1083;1085;1176 +1085,990;991;1084;1086;1176;1177 +1086,991;992;1085;1087;1177;1178 +1087,992;993;1086;1088;1178;1179 +1088,993;994;1087;1089;1179;1180 +1089,994;995;1088;1090;1180;1181 +1090,995;996;1089;1091;1181;1182 +1091,996;997;1090;1092;1182;1183 +1092,997;998;1091;1093;1183;1184 +1093,998;999;1092;1094;1184 +1094,999;1000;1093;1095 +1095,1000;1001;1094 +1096,1002;1003;1097 +1097,1003;1004;1096;1098;1185 +1098,1004;1005;1097;1099;1185;1186 +1099,1005;1006;1098;1100;1186;1187 +1100,1006;1007;1099;1101;1187;1188 +1101,1007;1008;1100;1102;1188;1189 +1102,1008;1009;1101;1103;1189;1190 +1103,1009;1010;1102;1104;1190;1191 +1104,1010;1011;1103;1105;1191;1192 +1105,1011;1012;1104;1106;1192;1193 +1106,1012;1013;1105;1107;1193;1194 +1107,1013;1014;1106;1108;1194;1195 +1108,1014;1015;1107;1109;1195;1196 +1109,1015;1016;1108;1110;1196;1197 +1110,1016;1017;1109;1111;1197;1198 +1111,1017;1018;1110;1112;1198;1199 +1112,1018;1111;1113;1199;1200;1201 +1113,1018;1019;1112;1114;1201;1202 +1114,1019;1020;1113;1115;1202;1203 +1115,1020;1021;1114;1116;1203;1204 +1116,1021;1022;1115;1117;1204;1205 +1117,1022;1023;1116;1118;1205;1206 +1118,1023;1024;1117;1119;1206;1207 +1119,1024;1025;1118;1120;1207;1208 +1120,1025;1026;1119;1121;1208;1209 +1121,1026;1027;1120;1122;1209;1210 +1122,1027;1028;1121;1123;1210;1211 +1123,1028;1029;1122;1124;1211;1212 +1124,1029;1030;1123;1125;1212;1213 +1125,1030;1031;1124;1126;1213;1214 +1126,1031;1032;1125;1127;1214;1215 +1127,1032;1033;1126;1128;1215 +1128,1033;1034;1127 +1129,1035;1036;1130 +1130,1036;1037;1129;1131 +1131,1037;1038;1130;1132;1216 +1132,1038;1039;1131;1133;1216;1217 +1133,1039;1040;1132;1134;1217;1218 +1134,1040;1041;1133;1135;1218;1219 +1135,1041;1042;1134;1136;1219;1220 +1136,1042;1043;1135;1137;1220;1221 +1137,1043;1044;1136;1138;1221;1222 +1138,1044;1045;1137;1139;1222;1223 +1139,1045;1046;1138;1140;1223;1224 +1140,1046;1047;1139;1141;1224 +1141,1047;1048;1140;1142 +1142,1048;1049;1141 +1143,1050;1051;1144 +1144,1051;1052;1143;1145;1225 +1145,1052;1053;1144;1146;1225;1226 +1146,1053;1054;1145;1147;1226;1227 +1147,1054;1055;1146;1148;1227;1228 +1148,1055;1056;1147;1149;1228;1229 +1149,1056;1057;1148;1150;1229;1230 +1150,1057;1058;1149;1151;1230;1231 +1151,1058;1059;1150;1152;1231;1232 +1152,1059;1060;1151;1153;1232;1233 +1153,1060;1061;1152;1154;1233;1234 +1154,1061;1062;1153;1155;1234;1235 +1155,1062;1063;1154;1156;1235;1236 +1156,1063;1064;1155;1157;1236;1237 +1157,1064;1065;1156;1158;1237;1238 +1158,1065;1066;1157;1159;1238;1239 +1159,1066;1158;1160;1239;1240;1241 +1160,1066;1067;1159;1161;1241;1242 +1161,1067;1068;1160;1162;1242;1243 +1162,1068;1069;1161;1163;1243;1244 +1163,1069;1070;1162;1164;1244;1245 +1164,1070;1071;1163;1165;1245;1246 +1165,1071;1072;1164;1166;1246;1247 +1166,1072;1073;1165;1167;1247;1248 +1167,1073;1074;1166;1168;1248;1249 +1168,1074;1075;1167;1169;1249;1250 +1169,1075;1076;1168;1170;1250;1251 +1170,1076;1077;1169;1171;1251;1252 +1171,1077;1078;1170;1172;1252;1253 +1172,1078;1079;1171;1173;1253;1254 +1173,1079;1080;1172;1174;1254;1255 +1174,1080;1081;1173;1175;1255 +1175,986;1081;1174 +1176,1084;1085;1177 +1177,1085;1086;1176;1178 +1178,1086;1087;1177;1179 +1179,1087;1088;1178;1180 +1180,1088;1089;1179;1181 +1181,1089;1090;1180;1182 +1182,1090;1091;1181;1183 +1183,1091;1092;1182;1184 +1184,1092;1093;1183 +1185,1097;1098;1186 +1186,1098;1099;1185;1187;1256 +1187,1099;1100;1186;1188;1256;1257 +1188,1100;1101;1187;1189;1257;1258 +1189,1101;1102;1188;1190;1258;1259 +1190,1102;1103;1189;1191;1259;1260 +1191,1103;1104;1190;1192;1260;1261 +1192,1104;1105;1191;1193;1261;1262 +1193,1105;1106;1192;1194;1262;1263 +1194,1106;1107;1193;1195;1263;1264 +1195,1107;1108;1194;1196;1264;1265 +1196,1108;1109;1195;1197;1265;1266 +1197,1109;1110;1196;1198;1266;1267 +1198,1110;1111;1197;1199;1267;1268 +1199,1111;1112;1198;1200;1268;1269 +1200,1112;1199;1201;1269;1270 +1201,1112;1113;1200;1202;1270;1271 +1202,1113;1114;1201;1203;1271;1272 +1203,1114;1115;1202;1204;1272;1273 +1204,1115;1116;1203;1205;1273;1274 +1205,1116;1117;1204;1206;1274;1275 +1206,1117;1118;1205;1207;1275;1276 +1207,1118;1119;1206;1208;1276;1277 +1208,1119;1120;1207;1209;1277;1278 +1209,1120;1121;1208;1210;1278;1279 +1210,1121;1122;1209;1211;1279;1280 +1211,1122;1123;1210;1212;1280;1281 +1212,1123;1124;1211;1213;1281;1282 +1213,1124;1125;1212;1214;1282;1283 +1214,1125;1126;1213;1215;1283 +1215,1126;1127;1214 +1216,1131;1132;1217 +1217,1132;1133;1216;1218 +1218,1133;1134;1217;1219 +1219,1134;1135;1218;1220 +1220,1135;1136;1219;1221 +1221,1136;1137;1220;1222 +1222,1137;1138;1221;1223 +1223,1138;1139;1222;1224 +1224,1139;1140;1223 +1225,1144;1145;1226 +1226,1145;1146;1225;1227;1284 +1227,1146;1147;1226;1228;1284;1285 +1228,1147;1148;1227;1229;1285;1286 +1229,1148;1149;1228;1230;1286;1287 +1230,1149;1150;1229;1231;1287;1288 +1231,1150;1151;1230;1232;1288;1289 +1232,1151;1152;1231;1233;1289;1290 +1233,1152;1153;1232;1234;1290;1291 +1234,1153;1154;1233;1235;1291;1292 +1235,1154;1155;1234;1236;1292;1293 +1236,1155;1156;1235;1237;1293;1294 +1237,1156;1157;1236;1238;1294;1295 +1238,1157;1158;1237;1239;1295;1296 +1239,1158;1159;1238;1240;1296;1297 +1240,1159;1239;1241;1297;1298 +1241,1159;1160;1240;1242;1298;1299 +1242,1160;1161;1241;1243;1299;1300 +1243,1161;1162;1242;1244;1300;1301 +1244,1162;1163;1243;1245;1301;1302 +1245,1163;1164;1244;1246;1302;1303 +1246,1164;1165;1245;1247;1303;1304 +1247,1165;1166;1246;1248;1304;1305 +1248,1166;1167;1247;1249;1305;1306 +1249,1167;1168;1248;1250;1306;1307 +1250,1168;1169;1249;1251;1307;1308 +1251,1169;1170;1250;1252;1308;1309 +1252,1170;1171;1251;1253;1309;1310 +1253,1171;1172;1252;1254;1310;1311 +1254,1172;1173;1253;1255;1311 +1255,1173;1174;1254 +1256,1186;1187;1257 +1257,1187;1188;1256;1258 +1258,1188;1189;1257;1259;1312 +1259,1189;1190;1258;1260;1312;1313 +1260,1190;1191;1259;1261;1313;1314 +1261,1191;1192;1260;1262;1314;1315 +1262,1192;1193;1261;1263;1315;1316 +1263,1193;1194;1262;1264;1316;1317 +1264,1194;1195;1263;1265;1317;1318 +1265,1195;1196;1264;1266;1318;1319 +1266,1196;1197;1265;1267;1319;1320 +1267,1197;1198;1266;1268;1320 +1268,1198;1199;1267;1269 +1269,1199;1200;1268 +1270,1200;1201;1271 +1271,1201;1202;1270;1272 +1272,1202;1203;1271;1273;1321 +1273,1203;1204;1272;1274;1321;1322 +1274,1204;1205;1273;1275;1322;1323 +1275,1205;1206;1274;1276;1323;1324 +1276,1206;1207;1275;1277;1324;1325 +1277,1207;1208;1276;1278;1325;1326 +1278,1208;1209;1277;1279;1326;1327 +1279,1209;1210;1278;1280;1327;1328 +1280,1210;1211;1279;1281;1328;1329 +1281,1211;1212;1280;1282;1329 +1282,1212;1213;1281;1283 +1283,1213;1214;1282 +1284,1226;1227;1285 +1285,1227;1228;1284;1286 +1286,1228;1229;1285;1287;1330 +1287,1229;1230;1286;1288;1330;1331 +1288,1230;1231;1287;1289;1331;1332 +1289,1231;1232;1288;1290;1332;1333 +1290,1232;1233;1289;1291;1333;1334 +1291,1233;1234;1290;1292;1334;1335 +1292,1234;1235;1291;1293;1335;1336 +1293,1235;1236;1292;1294;1336;1337 +1294,1236;1237;1293;1295;1337;1338 +1295,1237;1238;1294;1296;1338 +1296,1238;1239;1295;1297 +1297,1239;1240;1296 +1298,1240;1241;1299 +1299,1241;1242;1298;1300 +1300,1242;1243;1299;1301;1339 +1301,1243;1244;1300;1302;1339;1340 +1302,1244;1245;1301;1303;1340;1341 +1303,1245;1246;1302;1304;1341;1342 +1304,1246;1247;1303;1305;1342;1343 +1305,1247;1248;1304;1306;1343;1344 +1306,1248;1249;1305;1307;1344;1345 +1307,1249;1250;1306;1308;1345;1346 +1308,1250;1251;1307;1309;1346;1347 +1309,1251;1252;1308;1310;1347 +1310,1252;1253;1309;1311 +1311,1253;1254;1310 +1312,1258;1259;1313 +1313,1259;1260;1312;1314 +1314,1260;1261;1313;1315 +1315,1261;1262;1314;1316 +1316,1262;1263;1315;1317 +1317,1263;1264;1316;1318 +1318,1264;1265;1317;1319 +1319,1265;1266;1318;1320 +1320,1266;1267;1319 +1321,1272;1273;1322 +1322,1273;1274;1321;1323 +1323,1274;1275;1322;1324 +1324,1275;1276;1323;1325 +1325,1276;1277;1324;1326 +1326,1277;1278;1325;1327 +1327,1278;1279;1326;1328 +1328,1279;1280;1327;1329 +1329,1280;1281;1328 +1330,1286;1287;1331 +1331,1287;1288;1330;1332 +1332,1288;1289;1331;1333 +1333,1289;1290;1332;1334 +1334,1290;1291;1333;1335 +1335,1291;1292;1334;1336 +1336,1292;1293;1335;1337 +1337,1293;1294;1336;1338 +1338,1294;1295;1337 +1339,1300;1301;1340 +1340,1301;1302;1339;1341 +1341,1302;1303;1340;1342 +1342,1303;1304;1341;1343 +1343,1304;1305;1342;1344 +1344,1305;1306;1343;1345 +1345,1306;1307;1344;1346 +1346,1307;1308;1345;1347 +1347,1308;1309;1346 diff --git a/CaloClusterGNN/scripts/analysis_for_sophie.py b/CaloClusterGNN/scripts/analysis_for_sophie.py new file mode 100644 index 0000000..61b5493 --- /dev/null +++ b/CaloClusterGNN/scripts/analysis_for_sophie.py @@ -0,0 +1,455 @@ +#!/usr/bin/env python3 +""" +Comprehensive analysis of GNN clustering on MDC2025 v2 (with pileup). +Produces detailed diagnostic plots for comparison with Sophie's results. + +Runs GNN inference on packed test graphs to produce: + 1. Truth vs reco cluster-size histograms + 2. Truth vs reco cluster-energy histograms + 3. Split/merge rate by cluster size and energy + 4. Per-cluster purity and completeness distributions + 5. Detailed summary statistics + +Usage: + source setup_env.sh + python3 scripts/analysis_for_sophie.py +""" + +import sys +from collections import Counter, defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import yaml + +from src.data.normalization import load_stats, normalize_graph +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model + + +def analyze_clusters(reco_labels, truth_labels, energies): + """Detailed per-cluster analysis for one graph.""" + reco_ids = sorted(set(reco_labels[reco_labels >= 0].tolist())) + truth_ids = sorted(set(truth_labels[truth_labels >= 0].tolist())) + + # Truth cluster properties + truth_clusters = [] + for tid in truth_ids: + mask = truth_labels == tid + truth_clusters.append({ + "n_hits": int(mask.sum()), + "energy": float(energies[mask].sum()), + "max_hit_energy": float(energies[mask].max()), + }) + + # Reco cluster properties + reco_clusters = [] + for rid in reco_ids: + mask = reco_labels == rid + reco_clusters.append({ + "n_hits": int(mask.sum()), + "energy": float(energies[mask].sum()), + }) + + # Overlap matrix for matching + overlap = defaultdict(lambda: defaultdict(float)) + reco_energy = defaultdict(float) + truth_energy = {tid: float(energies[truth_labels == tid].sum()) for tid in truth_ids} + + for i in range(len(energies)): + e = energies[i] + r = reco_labels[i] + t = truth_labels[i] + if r >= 0: + reco_energy[r] += e + if r >= 0 and t >= 0: + overlap[r][t] += e + + # Greedy matching + matched_truth = set() + matched_reco = set() + match_purities = [] + match_completenesses = [] + + for r in sorted(reco_ids): + if r not in overlap: + continue + best_t = max(overlap[r], key=lambda t: overlap[r][t]) + shared = overlap[r][best_t] + pur = shared / reco_energy[r] if reco_energy[r] > 0 else 0 + comp = shared / truth_energy[best_t] if truth_energy[best_t] > 0 else 0 + if pur > 0.5 and comp > 0.5: + match_purities.append(pur) + match_completenesses.append(comp) + matched_truth.add(best_t) + matched_reco.add(r) + + # Per-truth-cluster: matched, split, merged + truth_detail = [] + for i, tid in enumerate(truth_ids): + # Which reco clusters overlap this truth cluster significantly? + overlapping_reco = [] + for r in reco_ids: + if r in overlap and tid in overlap[r]: + frac_of_reco = overlap[r][tid] / reco_energy[r] if reco_energy[r] > 0 else 0 + frac_of_truth = overlap[r][tid] / truth_energy[tid] if truth_energy[tid] > 0 else 0 + if frac_of_truth > 0.1: + overlapping_reco.append((r, frac_of_reco, frac_of_truth)) + + is_matched = tid in matched_truth + is_split = len([r for r, fr, ft in overlapping_reco if ft > 0.2]) > 1 + is_merged = not is_matched and len(overlapping_reco) > 0 + + truth_detail.append({ + **truth_clusters[i], + "matched": is_matched, + "split": is_split, + "merged": is_merged, + "n_overlapping_reco": len(overlapping_reco), + }) + + # Per-reco-cluster: check for merges + reco_detail = [] + for i, rid in enumerate(reco_ids): + sig_truth = [t for t, e in overlap[rid].items() + if reco_energy[rid] > 0 and e / reco_energy[rid] > 0.1] + reco_detail.append({ + **reco_clusters[i], + "is_merge": len(sig_truth) > 1, + "n_truth_contrib": len(sig_truth), + }) + + return { + "truth_clusters": truth_detail, + "reco_clusters": reco_detail, + "purities": match_purities, + "completenesses": match_completenesses, + "n_matched_truth": len(matched_truth), + "n_matched_reco": len(matched_reco), + } + + +def main(): + device = torch.device("cpu") + + # Load models + models = {} + for name, cfg_path, ckpt_path in [ + ("SimpleEdgeNet", "configs/default.yaml", + "outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt"), + ("CaloClusterNet", "configs/calo_cluster_net.yaml", + "outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt"), + ]: + with open(cfg_path) as f: + cfg = yaml.safe_load(f) + model = build_model(cfg) + ckpt = torch.load(ckpt_path, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + tau_edge = cfg["inference"]["tau_edge"] + models[name] = {"model": model, "tau_edge": tau_edge} + print(f" {name}: tau_edge={tau_edge}") + + stats = load_stats("data/normalization_stats.pt") + + # Load test graphs + graphs = torch.load("data/processed/test.pt", weights_only=False) + print(f"\nLoaded {len(graphs)} test graphs") + + # Run analysis for each model + results = {} + for model_name, m in models.items(): + print(f"\nAnalyzing {model_name}...") + all_truth = [] + all_reco = [] + all_pur = [] + all_comp = [] + + for gi, g in enumerate(graphs): + if gi % 1000 == 0: + print(f" {gi}/{len(graphs)}", flush=True) + + truth_labels = g.hit_truth_cluster.numpy() + # Recover raw energies before normalization (x[:, 0] = log(1+E)) + raw_energies = (torch.exp(g.x[:, 0]) - 1).numpy() + + # Normalize for model input + g_norm = g.clone() + normalize_graph(g_norm, stats) + with torch.no_grad(): + output = m["model"](g_norm.to(device)) + + if isinstance(output, dict): + logits = output["edge_logits"].cpu().numpy() + else: + logits = output.cpu().numpy() + + edge_index = g.edge_index.numpy() + n_nodes = g.x.shape[0] + + reco_labels, _ = reconstruct_clusters( + edge_index=edge_index, + edge_logits=logits, + n_nodes=n_nodes, + energies=raw_energies, + tau_edge=m["tau_edge"], + min_hits=1, + min_energy_mev=0.0, + ) + + result = analyze_clusters(reco_labels, truth_labels, raw_energies) + all_truth.extend(result["truth_clusters"]) + all_reco.extend(result["reco_clusters"]) + all_pur.extend(result["purities"]) + all_comp.extend(result["completenesses"]) + + results[model_name] = { + "truth": all_truth, + "reco": all_reco, + "purities": all_pur, + "completenesses": all_comp, + } + + # Print summary stats + for model_name, res in results.items(): + truth = res["truth"] + reco = res["reco"] + n_truth = len(truth) + n_reco = len(reco) + n_matched = sum(1 for t in truth if t["matched"]) + n_split = sum(1 for t in truth if t["split"]) + n_merged = sum(1 for r in reco if r["is_merge"]) + + print(f"\n{'='*60}") + print(f" {model_name} — MDC2025 v2 Test Set ({len(graphs)} graphs)") + print(f"{'='*60}") + print(f" Truth clusters: {n_truth:,}") + print(f" Reco clusters: {n_reco:,}") + print(f" Matched truth: {n_matched:,} ({n_matched/n_truth:.1%})") + print(f" Split truth: {n_split:,} ({n_split/n_truth:.2%})") + print(f" Merged reco: {n_merged:,} ({n_merged/n_reco:.2%}" if n_reco > 0 else f" Merged reco: 0") + print(f" Mean purity: {np.mean(res['purities']):.4f}") + print(f" Mean completeness: {np.mean(res['completenesses']):.4f}") + + # Split rate by cluster size + print(f"\n Split rate by truth cluster size:") + print(f" {'Size':<8} {'Total':>8} {'Split':>8} {'Rate':>8}") + for sz in [1, 2, 3, 4, 5, "6+"]: + if sz == "6+": + in_bin = [t for t in truth if t["n_hits"] >= 6] + else: + in_bin = [t for t in truth if t["n_hits"] == sz] + n = len(in_bin) + s = sum(1 for t in in_bin if t["split"]) + print(f" {str(sz):<8} {n:>8} {s:>8} {s/n*100 if n else 0:>7.1f}%") + + # Split rate by energy + print(f"\n Split rate by truth cluster energy:") + print(f" {'Energy':<15} {'Total':>8} {'Split':>8} {'Rate':>8}") + for lo, hi, lbl in [(0, 10, "<10 MeV"), (10, 50, "10-50 MeV"), + (50, 200, "50-200 MeV"), (200, float("inf"), ">200 MeV")]: + in_bin = [t for t in truth if lo <= t["energy"] < hi] + n = len(in_bin) + s = sum(1 for t in in_bin if t["split"]) + print(f" {lbl:<15} {n:>8} {s:>8} {s/n*100 if n else 0:>7.1f}%") + + # ── Plots ── + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + out_dir = Path("outputs/analysis_for_sophie") + out_dir.mkdir(parents=True, exist_ok=True) + + for model_name, res in results.items(): + truth = res["truth"] + reco = res["reco"] + short = "SEN" if "Simple" in model_name else "CCN" + tau = models[model_name]["tau_edge"] + + fig, axes = plt.subplots(3, 3, figsize=(20, 18)) + fig.suptitle( + f"MDC2025 v2 (Pileup) — {model_name} (τ={tau})\n" + f"{len(graphs):,} test disk-graphs, calo-entrant truth", + fontsize=14, fontweight="bold") + + # 1. Hits per cluster: truth vs reco + ax = axes[0, 0] + truth_nhits = [t["n_hits"] for t in truth] + reco_nhits = [r["n_hits"] for r in reco] + bins_h = np.arange(0.5, 16.5, 1) + ax.hist(truth_nhits, bins=bins_h, alpha=0.6, label=f"Truth (n={len(truth):,})", + color="forestgreen", edgecolor="white") + ax.hist(reco_nhits, bins=bins_h, alpha=0.6, label=f"Reco (n={len(reco):,})", + color="steelblue", edgecolor="white") + ax.set_xlabel("Hits per cluster") + ax.set_ylabel("Count") + ax.set_title("Cluster Size: Truth vs Reco") + ax.legend() + ax.grid(alpha=0.3) + ax.set_yscale("log") + + # 2. Energy per cluster: truth vs reco + ax = axes[0, 1] + truth_energy = [t["energy"] for t in truth] + reco_energy = [r["energy"] for r in reco] + bins_e = np.linspace(0, 300, 60) + ax.hist(truth_energy, bins=bins_e, alpha=0.6, label="Truth", + color="forestgreen", edgecolor="white") + ax.hist(reco_energy, bins=bins_e, alpha=0.6, label="Reco", + color="steelblue", edgecolor="white") + ax.set_xlabel("Total cluster energy (MeV)") + ax.set_ylabel("Count") + ax.set_title("Cluster Energy: Truth vs Reco") + ax.legend() + ax.grid(alpha=0.3) + ax.set_yscale("log") + + # 3. Purity distribution + ax = axes[0, 2] + ax.hist(res["purities"], bins=np.linspace(0.5, 1.0, 60), + color="steelblue", edgecolor="white", alpha=0.8) + ax.axvline(np.mean(res["purities"]), color="red", linestyle="--", + label=f"Mean: {np.mean(res['purities']):.4f}") + ax.set_xlabel("Purity") + ax.set_ylabel("Count") + ax.set_title("Matched Cluster Purity") + ax.legend() + ax.grid(alpha=0.3) + + # 4. Completeness distribution + ax = axes[1, 0] + ax.hist(res["completenesses"], bins=np.linspace(0.5, 1.0, 60), + color="seagreen", edgecolor="white", alpha=0.8) + ax.axvline(np.mean(res["completenesses"]), color="red", linestyle="--", + label=f"Mean: {np.mean(res['completenesses']):.4f}") + ax.set_xlabel("Completeness") + ax.set_ylabel("Count") + ax.set_title("Matched Cluster Completeness") + ax.legend() + ax.grid(alpha=0.3) + + # 5. Truth match rate by cluster size + ax = axes[1, 1] + size_bins = list(range(1, 11)) + match_rates = [] + counts = [] + for sz in size_bins: + in_bin = [t for t in truth if t["n_hits"] == sz] + if in_bin: + match_rates.append(sum(1 for t in in_bin if t["matched"]) / len(in_bin) * 100) + counts.append(len(in_bin)) + else: + match_rates.append(0) + counts.append(0) + ax.bar(size_bins, match_rates, color="steelblue", alpha=0.8) + for i, (mr, c) in enumerate(zip(match_rates, counts)): + if c > 0: + ax.text(size_bins[i], mr + 1, f"{c:,}", ha="center", fontsize=7) + ax.set_xlabel("Truth cluster size (hits)") + ax.set_ylabel("Truth match rate (%)") + ax.set_title("Truth Match Rate by Cluster Size\n(numbers = cluster count)") + ax.set_ylim(0, 105) + ax.grid(alpha=0.3, axis="y") + + # 6. Truth match rate by energy + ax = axes[1, 2] + energy_edges = [0, 5, 10, 20, 50, 100, 200, 500] + energy_labels = ["0-5", "5-10", "10-20", "20-50", "50-100", "100-200", "200+"] + mr_energy = [] + cnt_energy = [] + for i in range(len(energy_edges) - 1): + lo, hi = energy_edges[i], energy_edges[i + 1] + in_bin = [t for t in truth if lo <= t["energy"] < hi] + if in_bin: + mr_energy.append(sum(1 for t in in_bin if t["matched"]) / len(in_bin) * 100) + cnt_energy.append(len(in_bin)) + else: + mr_energy.append(0) + cnt_energy.append(0) + x_pos = np.arange(len(energy_labels)) + ax.bar(x_pos, mr_energy, color="seagreen", alpha=0.8) + for i, (mr, c) in enumerate(zip(mr_energy, cnt_energy)): + if c > 0: + ax.text(i, mr + 1, f"{c:,}", ha="center", fontsize=7) + ax.set_xticks(x_pos) + ax.set_xticklabels(energy_labels) + ax.set_xlabel("Truth cluster energy (MeV)") + ax.set_ylabel("Truth match rate (%)") + ax.set_title("Truth Match Rate by Energy\n(numbers = cluster count)") + ax.set_ylim(0, 105) + ax.grid(alpha=0.3, axis="y") + + # 7. Split rate by cluster size + ax = axes[2, 0] + split_rates = [] + for sz in size_bins: + in_bin = [t for t in truth if t["n_hits"] == sz] + if in_bin: + split_rates.append(sum(1 for t in in_bin if t["split"]) / len(in_bin) * 100) + else: + split_rates.append(0) + ax.bar(size_bins, split_rates, color="coral", alpha=0.8) + ax.set_xlabel("Truth cluster size (hits)") + ax.set_ylabel("Split rate (%)") + ax.set_title("Split Rate by Truth Cluster Size") + ax.grid(alpha=0.3, axis="y") + + # 8. Merge rate by reco cluster size + ax = axes[2, 1] + reco_sizes = [r["n_hits"] for r in reco] + reco_size_bins = list(range(1, 11)) + merge_rates = [] + for sz in reco_size_bins: + in_bin = [r for r in reco if r["n_hits"] == sz] + if in_bin: + merge_rates.append(sum(1 for r in in_bin if r["is_merge"]) / len(in_bin) * 100) + else: + merge_rates.append(0) + ax.bar(reco_size_bins, merge_rates, color="darkorange", alpha=0.8) + ax.set_xlabel("Reco cluster size (hits)") + ax.set_ylabel("Merge rate (%)") + ax.set_title("Merge Rate by Reco Cluster Size") + ax.grid(alpha=0.3, axis="y") + + # 9. Summary table + ax = axes[2, 2] + ax.axis("off") + n_truth = len(truth) + n_reco = len(reco) + n_matched = sum(1 for t in truth if t["matched"]) + n_split = sum(1 for t in truth if t["split"]) + n_merged = sum(1 for r in reco if r["is_merge"]) + table_data = [ + ["Test disk-graphs", f"{len(graphs):,}"], + ["Truth clusters", f"{n_truth:,}"], + ["Reco clusters", f"{n_reco:,}"], + ["Truth match rate", f"{n_matched/n_truth:.1%}"], + ["Mean purity", f"{np.mean(res['purities']):.4f}"], + ["Mean completeness", f"{np.mean(res['completenesses']):.4f}"], + ["Splits (truth)", f"{n_split:,} ({n_split/n_truth:.2%})"], + ["Merges (reco)", f"{n_merged:,} ({n_merged/n_reco:.2%})"], + ["Singleton truth", f"{sum(1 for t in truth if t['n_hits']==1):,} " + f"({sum(1 for t in truth if t['n_hits']==1)/n_truth:.1%})"], + ] + table = ax.table(cellText=table_data, colLabels=["Metric", "Value"], + cellLoc="center", loc="center") + table.auto_set_font_size(False) + table.set_fontsize(11) + table.scale(1, 1.6) + ax.set_title(f"{model_name} Summary", pad=20) + + plt.tight_layout() + plot_path = out_dir / f"analysis_{short.lower()}.png" + plt.savefig(plot_path, dpi=150, bbox_inches="tight") + plt.close() + print(f"Saved {plot_path}") + + print(f"\nAll plots saved to {out_dir}/") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/build_all_graphs.sh b/CaloClusterGNN/scripts/build_all_graphs.sh new file mode 100644 index 0000000..a5b981b --- /dev/null +++ b/CaloClusterGNN/scripts/build_all_graphs.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Build graphs for all splits from local ROOT files. +# Run on CPU node: bash scripts/build_all_graphs.sh +# +# Uses 500 events/file → ~50K graphs total, takes ~10 min. +# To use all events, remove the --n-events flag (will take ~4 hours). + +cd /exp/mu2e/app/users/wzhou2/projects/calorimeter/GNN +source setup_env.sh +set -e + +ROOT_DIR=/exp/mu2e/data/users/wzhou2/GNN/root_files_v2 +N_EVENTS=500 + +echo "=== Clearing old processed data ===" +find data/processed/ -name '*.pt' -delete 2>/dev/null || true +find data/processed/ -name '*.csv' -delete 2>/dev/null || true + +echo "" +echo "=== Building train graphs (35 files, $N_EVENTS events each) ===" +python3 scripts/build_graphs.py --split train --root-dir "$ROOT_DIR" --n-events "$N_EVENTS" --compute-norm + +echo "" +echo "=== Building val graphs (7 files, $N_EVENTS events each) ===" +python3 scripts/build_graphs.py --split val --root-dir "$ROOT_DIR" --n-events "$N_EVENTS" + +echo "" +echo "=== Building test graphs (8 files, $N_EVENTS events each) ===" +python3 scripts/build_graphs.py --split test --root-dir "$ROOT_DIR" --n-events "$N_EVENTS" + +echo "" +echo "=== Done ===" +echo "Total .pt files: $(find data/processed/ -name '*.pt' | wc -l)" +echo "Disk usage: $(du -sh data/processed/ | cut -f1)" diff --git a/CaloClusterGNN/scripts/build_graphs.py b/CaloClusterGNN/scripts/build_graphs.py new file mode 100644 index 0000000..670dbd7 --- /dev/null +++ b/CaloClusterGNN/scripts/build_graphs.py @@ -0,0 +1,140 @@ +""" +Build per-disk PyG graph files from EventNtuple ROOT files. + +Reads ROOT files listed in a split file, extracts per-disk graphs, +and saves them as .pt files to data/processed/. + +Usage: + source setup_env.sh + python3 scripts/build_graphs.py --split train --n-files 5 + python3 scripts/build_graphs.py --split train # all files in split + python3 scripts/build_graphs.py --split all --n-files 3 # ignore splits, use first N files + python3 scripts/build_graphs.py --split train --root-dir /exp/mu2e/data/users/wzhou2/GNN/root_files +""" + +import argparse +import csv +import sys +import time +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import yaml + +from src.data.dataset import extract_events_from_file +from src.data.normalization import compute_normalization_stats, save_stats +from src.geometry.crystal_geometry import load_crystal_map + + +def load_file_list(split_name, config): + """Load file list for a given split.""" + if split_name == "all": + import glob + pattern = "/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/*.root" + return sorted(glob.glob(pattern)) + + split_path = Path(config["data"]["splits"][split_name]) + if not split_path.exists(): + print(f"ERROR: Split file not found: {split_path}", file=sys.stderr) + sys.exit(1) + with open(split_path) as f: + return [line.strip() for line in f if line.strip()] + + +def main(): + parser = argparse.ArgumentParser(description="Build PyG graphs from ROOT files") + parser.add_argument("--split", type=str, default="train", + choices=["train", "val", "test", "all"], + help="Which split to process") + parser.add_argument("--n-files", type=int, default=None, + help="Max number of files to process") + parser.add_argument("--n-events", type=int, default=None, + help="Max events per file") + parser.add_argument("--config", type=str, default="configs/default.yaml", + help="Config file path") + parser.add_argument("--root-dir", type=str, default=None, + help="Local directory containing ROOT files (remaps split paths)") + parser.add_argument("--compute-norm", action="store_true", + help="Compute normalization stats (train split only)") + args = parser.parse_args() + + with open(args.config) as f: + config = yaml.safe_load(f) + + files = load_file_list(args.split, config) + if args.root_dir: + root_dir = Path(args.root_dir) + files = [str(root_dir / Path(f).name) for f in files] + if args.n_files: + files = files[:args.n_files] + + print(f"Processing {len(files)} files from '{args.split}' split") + + crystal_map = load_crystal_map(config["data"]["crystal_geometry"]) + graph_cfg = config["graph"] + out_dir = Path(config["data"]["processed_dir"]) + out_dir.mkdir(parents=True, exist_ok=True) + + total_graphs = 0 + total_nodes = 0 + total_edges = 0 + all_diags = [] + saved_paths = [] + + t0 = time.time() + for fi, filepath in enumerate(files): + fname = Path(filepath).stem + print(f" [{fi+1}/{len(files)}] {Path(filepath).name}...", end=" ", flush=True) + ft0 = time.time() + file_graphs = 0 + + for data, ev_idx, disk_id, diag in extract_events_from_file( + filepath, crystal_map, graph_cfg, + max_events=args.n_events, + ): + out_name = f"{fname}_evt{ev_idx:06d}_disk{disk_id}.pt" + out_path = out_dir / out_name + torch.save(data, out_path) + saved_paths.append(out_path) + + total_graphs += 1 + total_nodes += diag["n_nodes"] + total_edges += diag["n_edges"] + all_diags.append(diag) + file_graphs += 1 + + dt = time.time() - ft0 + print(f"{file_graphs} graphs ({dt:.1f}s)") + + elapsed = time.time() - t0 + print(f"\nDone: {total_graphs} graphs from {len(files)} files in {elapsed:.1f}s") + print(f" Total nodes: {total_nodes}, Total edges: {total_edges}") + if all_diags: + avg_deg = np.mean([d["avg_degree"] for d in all_diags]) + iso = sum(d["n_isolated"] for d in all_diags) + print(f" Avg degree: {avg_deg:.1f}, Isolated nodes: {iso}") + + # Save diagnostics + diag_path = out_dir / f"diagnostics_{args.split}.csv" + if all_diags: + with open(diag_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=all_diags[0].keys()) + writer.writeheader() + writer.writerows(all_diags) + print(f" Diagnostics: {diag_path}") + + # Compute normalization stats if requested (train only) + if args.compute_norm: + if args.split != "train": + print("WARNING: Normalization stats should only be computed from train split!") + print("\nComputing normalization stats...") + graphs = [torch.load(p, weights_only=False) for p in saved_paths] + stats = compute_normalization_stats(graphs) + save_stats(stats, config["data"]["normalization_stats"]) + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/evaluate_cluster_physics.py b/CaloClusterGNN/scripts/evaluate_cluster_physics.py new file mode 100644 index 0000000..137fc2c --- /dev/null +++ b/CaloClusterGNN/scripts/evaluate_cluster_physics.py @@ -0,0 +1,792 @@ +#!/usr/bin/env python3 +""" +Cluster-level physics evaluation: energy, centroid, and time residuals. + +For each matched reco<->truth cluster pair, computes: + - Energy residual: dE = E_reco - E_truth + - Centroid displacement: dr = |centroid_reco - centroid_truth| (x-y plane) + - Time residual: dt = t_reco - t_truth (seed hit = most energetic) + +Evaluates BFS + both GNN models against calo-entrant MC truth. + +Outputs: + - outputs/cluster_physics_eval/cluster_residuals.csv (per-cluster detail) + - outputs/cluster_physics_eval/summary.txt (aggregate statistics) + - outputs/cluster_physics_eval/residual_plots.png (comparison histograms) + +Usage: + source setup_env.sh + OMP_NUM_THREADS=4 PYTHONUNBUFFERED=1 python3 -u scripts/evaluate_cluster_physics.py + # Or with Run1B data: + python3 scripts/evaluate_cluster_physics.py --root-dir /exp/mu2e/data/users/wzhou2/GNN/root_files_run1b --file-list splits/val_files.txt --n-events 500 +""" + +import argparse +import csv +import sys +import time +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import uproot +import yaml + +from src.data.graph_builder import build_graph, compute_edge_features, compute_node_features +from src.data.normalization import load_stats, normalize_graph +from src.data.truth_labels_primary import build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model +from torch_geometric.data import Data + + +# ── Cluster physics computation ───────────────────────────────────────────── + +def compute_cluster_physics(labels, positions, energies, times, + saliency_mask=None): + """Compute energy, centroid, and time for each cluster. + + Parameters + ---------- + labels : ndarray (N,) — cluster IDs, -1 = unassigned + positions : ndarray (N, 2) — (x, y) in mm + energies : ndarray (N,) — MeV + times : ndarray (N,) — ns + saliency_mask : ndarray (N,) bool, optional + If provided, cluster physics are computed using only salient hits. + The cluster membership itself is unchanged — all hits stay in their + clusters for matching purposes, but energy/centroid/time are computed + from the salient subset only. + + Returns + ------- + dict: cluster_id -> {energy, centroid_x, centroid_y, time, n_hits} + """ + result = {} + for cid in np.unique(labels): + if cid < 0: + continue + mask = labels == cid + if saliency_mask is not None: + # Use only salient hits for physics computation + physics_mask = mask & saliency_mask + if physics_mask.sum() == 0: + # All hits non-salient — fall back to full cluster + physics_mask = mask + else: + physics_mask = mask + + e = energies[physics_mask] + total_e = e.sum() + if total_e <= 0: + continue + w = e / total_e + pos = positions[physics_mask] + t = times[physics_mask] + + # Seed time = time of most energetic hit (Offline convention) + seed_time = float(t[np.argmax(e)]) + + result[int(cid)] = { + "energy": float(total_e), + "centroid_x": float(np.dot(w, pos[:, 0])), + "centroid_y": float(np.dot(w, pos[:, 1])), + "time": seed_time, + "n_hits": int(mask.sum()), # n_hits = full cluster size + } + return result + + +# ── Matching and residual computation ──────────────────────────────────────── + +def match_and_compute_residuals(pred_labels, truth_labels, positions, + energies, times, method_name, + saliency_mask=None): + """Match reco to truth clusters and compute physics residuals. + + Uses greedy energy-weighted matching (purity > 0.5, completeness > 0.5). + Matching uses full cluster energies (all hits). Physics residuals use + saliency-filtered cluster properties when saliency_mask is provided. + + Returns list of dicts, one per matched pair. + """ + # Physics computed from salient hits only (if mask provided) + pred_physics = compute_cluster_physics(pred_labels, positions, energies, + times, saliency_mask=saliency_mask) + truth_physics = compute_cluster_physics(truth_labels, positions, energies, times) + + pred_ids = sorted(pred_physics.keys()) + truth_ids = sorted(truth_physics.keys()) + + if not pred_ids or not truth_ids: + return [] + + # Build energy overlap matrix + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + truth_energy = {tid: truth_physics[tid]["energy"] for tid in truth_ids} + + for i in range(len(energies)): + e = energies[i] + p = pred_labels[i] + t = truth_labels[i] + if p >= 0: + pred_energy[p] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + # Greedy matching + records = [] + matched_truth = set() + + for pid in pred_ids: + if pid not in overlap: + continue + best_tid = max(overlap[pid], key=lambda t: overlap[pid][t]) + if best_tid in matched_truth: + continue + shared = overlap[pid][best_tid] + pur = shared / pred_energy[pid] if pred_energy[pid] > 0 else 0 + comp = shared / truth_energy[best_tid] if truth_energy[best_tid] > 0 else 0 + + if pur > 0.5 and comp > 0.5: + matched_truth.add(best_tid) + rp = pred_physics[pid] + tp = truth_physics[best_tid] + + dE = rp["energy"] - tp["energy"] + E_ratio = rp["energy"] / tp["energy"] if tp["energy"] > 0 else 0 + dx = rp["centroid_x"] - tp["centroid_x"] + dy = rp["centroid_y"] - tp["centroid_y"] + dr = np.sqrt(dx**2 + dy**2) + dt = rp["time"] - tp["time"] + + records.append({ + "method": method_name, + "truth_energy": tp["energy"], + "truth_nhits": tp["n_hits"], + "reco_energy": rp["energy"], + "reco_nhits": rp["n_hits"], + "dE": dE, + "E_ratio": E_ratio, + "dx": dx, + "dy": dy, + "dr": dr, + "dt": dt, + "purity": pur, + "completeness": comp, + }) + + return records + + +# ── MC truth cluster builder ──────────────────────────────────────────────── + +def build_mc_truth_clusters(simids, edeps, disks, nhits, + calo_root_map, purity_thresh=0.7): + """Build MC truth cluster labels using calo-entrant truth.""" + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} + next_label = 0 + + for i in range(nhits): + sids = np.array(simids[i]) + deps = np.array(edeps[i], dtype=np.float64) + if len(sids) == 0 or deps.sum() <= 0: + continue + disk = int(disks[i]) + + root_edep = {} + for pid, dep in zip(sids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + + best_root = max(root_edep, key=root_edep.get) + purity = root_edep[best_root] / deps.sum() + if purity < purity_thresh: + continue + key = (disk, best_root) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + + return truth_labels + + +# ── Main ───────────────────────────────────────────────────────────────────── + +def main(): + parser = argparse.ArgumentParser( + description="Cluster-level physics evaluation: energy, centroid, time") + parser.add_argument("--root-dir", type=str, + default="/exp/mu2e/data/users/wzhou2/GNN/root_files_v2") + parser.add_argument("--file-list", type=str, default="splits/val_files.txt", + help="File listing ROOT files to process") + parser.add_argument("--n-events", type=int, default=500, + help="Max events per file") + parser.add_argument("--output-dir", type=str, + default="outputs/cluster_physics_eval") + parser.add_argument("--device", type=str, default="cpu") + args = parser.parse_args() + + device = torch.device(args.device) + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + # ── Load models ── + # Each entry: (config_path, checkpoint_path, bfs_expand_cut or None) + configs = { + "SimpleEdgeNet": ("configs/default.yaml", + "outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt", None), + "SEN+BFS10": ("configs/default.yaml", + "outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt", 10), + "CaloClusterNet": ("configs/calo_cluster_net.yaml", + "outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt", None), + "CCN+BFS10": ("configs/calo_cluster_net.yaml", + "outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt", 10), + } + + models = {} + tau_edges = {} + tau_nodes = {} + bfs_ecs = {} + for name, (cfg_path, ckpt_path, bfs_ec) in configs.items(): + with open(cfg_path) as f: + cfg = yaml.safe_load(f) + # Reuse model object if same checkpoint + base_name = name.split("+")[0] + if base_name in models: + models[name] = models[base_name] + else: + model = build_model(cfg) + ckpt = torch.load(ckpt_path, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + models[name] = model + tau_edges[name] = cfg["inference"]["tau_edge"] + model_type = cfg["model"].get("name", "SimpleEdgeNet") + has_node = model_type == "CaloClusterNet" + lam_node = cfg.get("train", {}).get("lambda_node", 0.0) + tau_nodes[name] = cfg["inference"].get("tau_node") if (has_node and lam_node > 0) else None + bfs_ecs[name] = bfs_ec + ec_str = f", bfs_ec={bfs_ec}" if bfs_ec else "" + print(f"Loaded {name}: tau_edge={tau_edges[name]}{ec_str}") + + # ── Load shared resources ── + with open("configs/default.yaml") as f: + base_cfg = yaml.safe_load(f) + stats = load_stats(base_cfg["data"]["normalization_stats"]) + crystal_map = load_crystal_map("data/crystal_geometry.csv") + crystal_disk_map = {cid: disk for cid, (disk, _, _) in crystal_map.items()} + graph_cfg = base_cfg["graph"] + + # ── Load file list ── + with open(args.file_list) as f: + file_list = [l.strip() for l in f if l.strip()] + + # Resolve paths: try root-dir + filename + root_dir = Path(args.root_dir) + root_files = [] + for fpath in file_list: + # Try as-is first, then just the filename in root_dir + p = Path(fpath) + if p.exists(): + root_files.append(str(p)) + else: + local = root_dir / p.name + if local.exists(): + root_files.append(str(local)) + else: + # Try matching by subrun ID + import re + m = re.search(r'001\d+_(\d+)', p.name) + if m: + subrun = m.group(1) + matches = list(root_dir.glob(f"*{subrun}*.root")) + if matches: + root_files.append(str(matches[0])) + continue + print(f" WARNING: cannot find {p.name} in {root_dir}, skipping") + + if not root_files: + # Fall back: use all ROOT files in root_dir + root_files = sorted(str(p) for p in root_dir.glob("*.root")) + print(f"Using all {len(root_files)} ROOT files from {root_dir}") + else: + print(f"Found {len(root_files)} / {len(file_list)} ROOT files") + + branches = [ + "calohits.crystalId_", "calohits.eDep_", "calohits.time_", + "calohits.clusterIdx_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calomcsim.id", "calomcsim.ancestorSimIds", + ] + + # ── Process events ── + all_records = [] # list of dicts + n_disk_graphs = 0 + t0 = time.time() + + for fi, fpath in enumerate(root_files): + fname = Path(fpath).name + print(f" [{fi+1}/{len(root_files)}] {fname}...", end=" ", flush=True) + + tree = uproot.open(fpath + ":EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.n_events) + n_events = len(arrays) + + for ev in range(n_events): + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + continue + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + bfs_idx = np.array(arrays["calohits.clusterIdx_"][ev], dtype=np.int64) + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], + dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], + dtype=np.float64) + simids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + # Build calo-entrant root map + sim_ids_evt = arrays["calomcsim.id"][ev] + ancestor_ids_evt = arrays["calomcsim.ancestorSimIds"][ev] + calo_root_map = build_calo_root_map( + sim_ids_evt, ancestor_ids_evt, + simids, cryids, crystal_disk_map) + + disks = np.array([crystal_map[int(c)][0] if int(c) in crystal_map + else -1 for c in cryids], dtype=np.int64) + + # Fallback positions from crystal geometry + if np.all(xs == 0) and np.all(ys == 0): + for i, c in enumerate(cryids): + if int(c) in crystal_map: + _, xs[i], ys[i] = crystal_map[int(c)] + + for disk_id in [0, 1]: + dm = disks == disk_id + n_disk = dm.sum() + if n_disk < 2: + continue + + d_e = energies[dm] + d_t = times[dm] + d_x = xs[dm] + d_y = ys[dm] + d_pos = np.stack([d_x, d_y], axis=1) + d_bfs = bfs_idx[dm] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + disk_indices = np.where(dm)[0] + d_simids = [list(simids[i]) for i in disk_indices] + d_edeps = [list(edeps_mc[i]) for i in disk_indices] + + # MC truth clusters + mc_truth = build_mc_truth_clusters( + d_simids, d_edeps, d_disks, n_disk, calo_root_map) + + # ── BFS residuals ── + bfs_recs = match_and_compute_residuals( + d_bfs, mc_truth, d_pos, d_e, d_t, "BFS") + all_records.extend(bfs_recs) + + # ── GNN residuals ── + edge_index, _ = build_graph( + d_pos, d_t, + r_max=graph_cfg["r_max_mm"], dt_max=graph_cfg["dt_max_ns"], + k_min=graph_cfg["k_min"], k_max=graph_cfg["k_max"]) + + if edge_index.shape[1] == 0: + n_disk_graphs += 1 + continue + + node_feat = compute_node_features(d_pos, d_t, d_e) + edge_feat = compute_edge_features(d_pos, d_t, d_e, edge_index) + + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + ) + normalize_graph(data, stats) + + for model_name, model in models.items(): + with torch.no_grad(): + output = model(data.to(device)) + + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + gnn_labels, _ = reconstruct_clusters( + edge_index=edge_index, + edge_logits=logits_np, + n_nodes=n_disk, + energies=d_e, + tau_edge=tau_edges[model_name], + min_hits=1, min_energy_mev=0.0, + bfs_expand_cut=bfs_ecs[model_name], + ) + + # For saliency models, compute cluster physics from + # salient hits only (clustering unchanged for matching) + sal_mask = None + if node_logits_np is not None and tau_nodes[model_name] is not None: + node_probs = 1.0 / (1.0 + np.exp(-node_logits_np.astype(np.float64))) + sal_mask = node_probs >= tau_nodes[model_name] + + gnn_recs = match_and_compute_residuals( + gnn_labels, mc_truth, d_pos, d_e, d_t, model_name, + saliency_mask=sal_mask) + all_records.extend(gnn_recs) + + n_disk_graphs += 1 + + print(f"{n_events} events") + + elapsed = time.time() - t0 + print(f"\nProcessed {n_disk_graphs} disk-graphs from {len(root_files)} files " + f"in {elapsed:.1f}s") + + # ── Save per-cluster CSV ── + csv_path = out_dir / "cluster_residuals.csv" + fieldnames = ["method", "truth_energy", "truth_nhits", "reco_energy", + "reco_nhits", "dE", "E_ratio", "dx", "dy", "dr", "dt", + "purity", "completeness"] + with open(csv_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=fieldnames) + writer.writeheader() + for r in all_records: + row = {k: (f"{v:.6f}" if isinstance(v, float) else v) + for k, v in r.items()} + writer.writerow(row) + print(f"Saved {len(all_records)} cluster residuals to {csv_path}") + + # ── Summary statistics ── + methods = ["BFS", "SimpleEdgeNet", "SEN+BFS10", "CaloClusterNet", "CCN+BFS10"] + summary_lines = [] + + def add(line): + summary_lines.append(line) + print(line) + + add(f"\n{'='*70}") + add(f" Cluster-Level Physics Evaluation") + add(f" {n_disk_graphs} disk-graphs, {len(root_files)} files, " + f"{args.n_events} events/file") + add(f"{'='*70}") + + energy_bins = [(0, 50, "<50 MeV"), (50, 200, "50-200 MeV"), + (200, float("inf"), ">200 MeV")] + mult_bins = [(1, 2, "1 hit"), (2, 4, "2-3 hits"), (4, float("inf"), "4+ hits")] + + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if not recs: + add(f"\n {method}: no matched clusters") + continue + + dE = np.array([r["dE"] for r in recs]) + dx = np.array([r["dx"] for r in recs]) + dy = np.array([r["dy"] for r in recs]) + dr = np.array([r["dr"] for r in recs]) + dt = np.array([r["dt"] for r in recs]) + E_ratio = np.array([r["E_ratio"] for r in recs]) + + add(f"\n {method} ({len(recs)} matched clusters)") + add(f" {'─'*60}") + add(f" {'Metric':<25} {'Mean':>10} {'Median':>10} {'Std':>10} {'90th%':>10}") + add(f" {'─'*60}") + add(f" {'dE (MeV)':<25} {dE.mean():>10.3f} {np.median(dE):>10.3f} " + f"{dE.std():>10.3f} {np.percentile(np.abs(dE), 90):>10.3f}") + add(f" {'|dE| (MeV)':<25} {np.abs(dE).mean():>10.3f} " + f"{np.median(np.abs(dE)):>10.3f} " + f"{'':>10} {np.percentile(np.abs(dE), 90):>10.3f}") + add(f" {'E_reco/E_truth':<25} {E_ratio.mean():>10.4f} " + f"{np.median(E_ratio):>10.4f} {E_ratio.std():>10.4f} {'':>10}") + add(f" {'dx (mm)':<25} {dx.mean():>10.3f} {np.median(dx):>10.3f} " + f"{dx.std():>10.3f} {np.percentile(np.abs(dx), 90):>10.3f}") + add(f" {'dy (mm)':<25} {dy.mean():>10.3f} {np.median(dy):>10.3f} " + f"{dy.std():>10.3f} {np.percentile(np.abs(dy), 90):>10.3f}") + add(f" {'dr (mm)':<25} {dr.mean():>10.3f} {np.median(dr):>10.3f} " + f"{dr.std():>10.3f} {np.percentile(dr, 90):>10.3f}") + add(f" {'dt (ns)':<25} {dt.mean():>10.3f} {np.median(dt):>10.3f} " + f"{dt.std():>10.3f} {np.percentile(np.abs(dt), 90):>10.3f}") + + # Quality cut fractions + n = len(recs) + add(f"\n Quality cuts:") + add(f" |dE| > 10 MeV: {(np.abs(dE) > 10).sum():>6d} / {n} " + f"({(np.abs(dE) > 10).mean():.1%})") + add(f" dr > 10 mm: {(dr > 10).sum():>6d} / {n} " + f"({(dr > 10).mean():.1%})") + add(f" |dt| > 1 ns: {(np.abs(dt) > 1).sum():>6d} / {n} " + f"({(np.abs(dt) > 1).mean():.1%})") + + # Energy-binned breakdown + add(f"\n Energy-binned |dE| and dr:") + add(f" {'Bin':<15} {'N':>6} {'mean|dE|':>10} {'meanDr':>10} {'mean|dt|':>10}") + for lo, hi, label in energy_bins: + sub = [r for r in recs if lo <= r["truth_energy"] < hi] + if not sub: + add(f" {label:<15} {'0':>6}") + continue + s_dE = np.abs(np.array([r["dE"] for r in sub])) + s_dr = np.array([r["dr"] for r in sub]) + s_dt = np.abs(np.array([r["dt"] for r in sub])) + add(f" {label:<15} {len(sub):>6} {s_dE.mean():>10.3f} " + f"{s_dr.mean():>10.3f} {s_dt.mean():>10.3f}") + + # Multiplicity-binned breakdown + add(f"\n Multiplicity-binned |dE| and dr:") + add(f" {'Bin':<15} {'N':>6} {'mean|dE|':>10} {'meanDr':>10} {'mean|dt|':>10}") + for lo, hi, label in mult_bins: + sub = [r for r in recs if lo <= r["truth_nhits"] < hi] + if not sub: + add(f" {label:<15} {'0':>6}") + continue + s_dE = np.abs(np.array([r["dE"] for r in sub])) + s_dr = np.array([r["dr"] for r in sub]) + s_dt = np.abs(np.array([r["dt"] for r in sub])) + add(f" {label:<15} {len(sub):>6} {s_dE.mean():>10.3f} " + f"{s_dr.mean():>10.3f} {s_dt.mean():>10.3f}") + + # Save summary + summary_path = out_dir / "summary.txt" + with open(summary_path, "w") as f: + f.write("\n".join(summary_lines) + "\n") + print(f"\nSaved summary to {summary_path}") + + # ── Plots ── + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + colors = {"BFS": "coral", "SimpleEdgeNet": "steelblue", "SEN+BFS10": "royalblue", + "CaloClusterNet": "seagreen", "CCN+BFS10": "darkgreen"} + + fig, axes = plt.subplots(3, 3, figsize=(18, 16)) + fig.suptitle(f"Cluster-Level Physics Residuals\n" + f"{n_disk_graphs} disk-graphs, {len(root_files)} files", + fontsize=14, fontweight="bold") + + # ── Row 1: residual distributions (log y-scale) ── + + # 1. dE histogram + ax = axes[0, 0] + bins_dE = np.linspace(-50, 50, 200) + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if recs: + vals = np.array([r["dE"] for r in recs]) + ax.hist(vals, bins=bins_dE, alpha=0.5, + label=f"{method} (n={len(vals):,})", + color=colors[method], edgecolor="none") + ax.set_yscale("log") + ax.set_xlabel("dE = E_reco − E_truth (MeV)") + ax.set_ylabel("Clusters (log scale)") + ax.set_title("Energy Residual") + ax.legend(fontsize=8) + ax.grid(alpha=0.3) + ax.axvline(0, color="black", linestyle="--", alpha=0.3) + + # 2. dx histogram + ax = axes[0, 1] + bins_dxy = np.linspace(-80, 80, 200) + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if recs: + vals = np.array([r["dx"] for r in recs]) + ax.hist(vals, bins=bins_dxy, alpha=0.5, + label=f"{method} (n={len(vals):,})", + color=colors[method], edgecolor="none") + ax.set_yscale("log") + ax.set_xlabel("dx = x_reco − x_truth (mm)") + ax.set_ylabel("Clusters (log scale)") + ax.set_title("Centroid X Residual") + ax.legend(fontsize=8) + ax.grid(alpha=0.3) + ax.axvline(0, color="black", linestyle="--", alpha=0.3) + + # 3. dy histogram + ax = axes[0, 2] + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if recs: + vals = np.array([r["dy"] for r in recs]) + ax.hist(vals, bins=bins_dxy, alpha=0.5, + label=f"{method} (n={len(vals):,})", + color=colors[method], edgecolor="none") + ax.set_yscale("log") + ax.set_xlabel("dy = y_reco − y_truth (mm)") + ax.set_ylabel("Clusters (log scale)") + ax.set_title("Centroid Y Residual") + ax.legend(fontsize=8) + ax.grid(alpha=0.3) + ax.axvline(0, color="black", linestyle="--", alpha=0.3) + + # ── Row 2: dr, dt, and summary bar chart ── + + # 4. dr histogram + ax = axes[1, 0] + bins_dr = np.linspace(0, 80, 200) + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if recs: + vals = np.array([r["dr"] for r in recs]) + ax.hist(vals, bins=bins_dr, alpha=0.5, + label=f"{method} (n={len(vals):,})", + color=colors[method], edgecolor="none") + ax.set_yscale("log") + ax.set_xlabel("Centroid displacement dr (mm)") + ax.set_ylabel("Clusters (log scale)") + ax.set_title("Centroid Displacement (combined)") + ax.legend(fontsize=8) + ax.grid(alpha=0.3) + + # 5. dt histogram + ax = axes[1, 1] + bins_dt = np.linspace(-10, 10, 200) + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if recs: + vals = np.array([r["dt"] for r in recs]) + ax.hist(vals, bins=bins_dt, alpha=0.5, + label=f"{method} (n={len(vals):,})", + color=colors[method], edgecolor="none") + ax.set_yscale("log") + ax.set_xlabel("dt = t_reco − t_truth (ns)") + ax.set_ylabel("Clusters (log scale)") + ax.set_title("Time Residual") + ax.legend(fontsize=8) + ax.grid(alpha=0.3) + ax.axvline(0, color="black", linestyle="--", alpha=0.3) + + # 6. Summary comparison bar chart + ax = axes[1, 2] + metrics_labels = ["Mean |dE|\n(MeV)", "Mean |dx|\n(mm)", "Mean |dy|\n(mm)", + "Mean dr\n(mm)", "Mean |dt|\n(ns)"] + x = np.arange(len(metrics_labels)) + w = 0.15 + for mi, method in enumerate(methods): + recs = [r for r in all_records if r["method"] == method] + if not recs: + continue + dE_v = np.abs(np.array([r["dE"] for r in recs])) + dx_v = np.abs(np.array([r["dx"] for r in recs])) + dy_v = np.abs(np.array([r["dy"] for r in recs])) + dr_v = np.array([r["dr"] for r in recs]) + dt_v = np.abs(np.array([r["dt"] for r in recs])) + vals = [dE_v.mean(), dx_v.mean(), dy_v.mean(), dr_v.mean(), dt_v.mean()] + offset = (mi - 2) * w + bars = ax.bar(x + offset, vals, w, label=method, + color=colors[method], alpha=0.8) + for i, v in enumerate(vals): + ax.text(x[i] + offset, v + max(vals) * 0.02, f"{v:.2f}", + ha="center", fontsize=6) + ax.set_xticks(x) + ax.set_xticklabels(metrics_labels, fontsize=9) + ax.set_title("Summary Comparison") + ax.legend(fontsize=7) + ax.grid(alpha=0.3, axis="y") + + # ── Row 3: energy-dependent residuals ── + + # 7. |dE| vs truth energy (binned means) + ax = axes[2, 0] + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if not recs: + continue + te = np.array([r["truth_energy"] for r in recs]) + ade = np.abs(np.array([r["dE"] for r in recs])) + bin_edges = [0, 20, 40, 60, 80, 100, 150, 200, 300] + centers, means = [], [] + for i in range(len(bin_edges) - 1): + mask = (te >= bin_edges[i]) & (te < bin_edges[i+1]) + if mask.sum() >= 5: + centers.append((bin_edges[i] + bin_edges[i+1]) / 2) + means.append(ade[mask].mean()) + ax.plot(centers, means, 'o-', color=colors[method], label=method, + markersize=5, linewidth=1.5) + ax.set_xlabel("Truth cluster energy (MeV)") + ax.set_ylabel("Mean |dE| (MeV)") + ax.set_title("|dE| vs Truth Energy") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + + # 8. dr vs truth energy (binned means) + ax = axes[2, 1] + for method in methods: + recs = [r for r in all_records if r["method"] == method] + if not recs: + continue + te = np.array([r["truth_energy"] for r in recs]) + dr_vals = np.array([r["dr"] for r in recs]) + bin_edges = [0, 20, 40, 60, 80, 100, 150, 200, 300] + centers, means = [], [] + for i in range(len(bin_edges) - 1): + mask = (te >= bin_edges[i]) & (te < bin_edges[i+1]) + if mask.sum() >= 5: + centers.append((bin_edges[i] + bin_edges[i+1]) / 2) + means.append(dr_vals[mask].mean()) + ax.plot(centers, means, 'o-', color=colors[method], label=method, + markersize=5, linewidth=1.5) + ax.set_xlabel("Truth cluster energy (MeV)") + ax.set_ylabel("Mean centroid displacement (mm)") + ax.set_title("Centroid Displacement vs Truth Energy") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + + # 9. Quality cut fractions bar chart + ax = axes[2, 2] + metrics_labels = ["|dE|>10MeV\n(%)", "dr>10mm\n(%)", "|dx|>10mm\n(%)", + "|dy|>10mm\n(%)"] + x = np.arange(len(metrics_labels)) + w = 0.15 + for mi, method in enumerate(methods): + recs = [r for r in all_records if r["method"] == method] + if not recs: + continue + n = len(recs) + dE_v = np.abs(np.array([r["dE"] for r in recs])) + dr_v = np.array([r["dr"] for r in recs]) + dx_v = np.abs(np.array([r["dx"] for r in recs])) + dy_v = np.abs(np.array([r["dy"] for r in recs])) + vals = [(dE_v > 10).sum() / n * 100, (dr_v > 10).sum() / n * 100, + (dx_v > 10).sum() / n * 100, (dy_v > 10).sum() / n * 100] + offset = (mi - 2) * w + ax.bar(x + offset, vals, w, label=method, + color=colors[method], alpha=0.8) + for i, v in enumerate(vals): + ax.text(x[i] + offset, v + max(vals) * 0.02, f"{v:.1f}%", + ha="center", fontsize=6) + ax.set_xticks(x) + ax.set_xticklabels(metrics_labels, fontsize=9) + ax.set_title("Quality Cut Fractions") + ax.legend(fontsize=7) + ax.grid(alpha=0.3, axis="y") + + plt.tight_layout() + plot_path = out_dir / "residual_plots.png" + plt.savefig(plot_path, dpi=150, bbox_inches="tight") + print(f"Saved plots to {plot_path}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/evaluate_new_truth.py b/CaloClusterGNN/scripts/evaluate_new_truth.py new file mode 100644 index 0000000..60efd66 --- /dev/null +++ b/CaloClusterGNN/scripts/evaluate_new_truth.py @@ -0,0 +1,499 @@ +#!/usr/bin/env python3 +"""Task 11d: Re-evaluate existing models against calo-entrant truth. + +Reads v2 ROOT files (with calomcsim.ancestorSimIds), builds graphs +on the fly, runs GNN inference, and evaluates against BOTH old +(SimParticle) and new (calo-entrant) truth definitions. + +This answers the key question: what fraction of the old "merge errors" +disappear when truth is redefined at the primary-shower level? + +Usage: + source setup_env.sh + OMP_NUM_THREADS=4 python3 scripts/evaluate_new_truth.py + OMP_NUM_THREADS=4 python3 scripts/evaluate_new_truth.py --split val --max-events 500 +""" + +import argparse +import csv +import sys +import time +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import uproot +import yaml + +from src.data.graph_builder import build_graph, compute_edge_features, compute_node_features +from src.data.normalization import load_stats, normalize_graph +from src.data.truth_labels import assign_mc_truth +from src.data.truth_labels_primary import assign_mc_truth_primary, build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model +from torch_geometric.data import Data + + +def build_mc_truth_clusters_old(simids, edeps, disks, nhits, purity_thresh=0.7): + """Build old SimParticle-level truth clusters.""" + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} + next_label = 0 + for i in range(nhits): + sids = list(simids[i]) + deps = list(edeps[i]) + if len(sids) == 0 or sum(deps) <= 0: + continue + deps_arr = np.array(deps) + best = int(np.argmax(deps_arr)) + purity = deps_arr[best] / deps_arr.sum() + if purity < purity_thresh: + continue + key = (int(disks[i]), int(sids[best])) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + return truth_labels + + +def build_mc_truth_clusters_new(simids, edeps, disks, nhits, + calo_root_map, purity_thresh=0.7): + """Build new calo-entrant truth clusters.""" + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} + next_label = 0 + for i in range(nhits): + pids = list(simids[i]) + deps = list(edeps[i]) + if len(pids) == 0 or sum(deps) <= 0: + continue + total_e = sum(deps) + disk = int(disks[i]) + # Group by calo-root + root_edep = {} + for pid, dep in zip(pids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + best_root = max(root_edep, key=root_edep.get) + purity = root_edep[best_root] / total_e + if purity < purity_thresh: + continue + key = (disk, best_root) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + return truth_labels + + +def match_clusters(pred_labels, truth_labels, energies): + """Energy-weighted greedy matching. Returns aggregate dict.""" + pred_ids = sorted(set(pred_labels[pred_labels >= 0].tolist())) + truth_ids = sorted(set(truth_labels[truth_labels >= 0].tolist())) + + truth_energy = {} + truth_nhits = {} + for tid in truth_ids: + tmask = truth_labels == tid + truth_energy[tid] = float(energies[tmask].sum()) + truth_nhits[tid] = int(tmask.sum()) + + if not pred_ids or not truth_ids: + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": 0, "n_matched_truth": 0, + "purities": [], "completenesses": [], + "n_split": 0, "n_merged": 0, + } + + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + for i in range(len(energies)): + e = energies[i] + p, t = pred_labels[i], truth_labels[i] + if p >= 0: + pred_energy[p] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + purities, completenesses = [], [] + matched_truth = set() + for p in sorted(pred_ids): + if p not in overlap: + continue + best_t = max(overlap[p], key=lambda t: overlap[p][t]) + shared = overlap[p][best_t] + pur = shared / pred_energy[p] if pred_energy[p] > 0 else 0 + comp = shared / truth_energy[best_t] if truth_energy[best_t] > 0 else 0 + if pur > 0.5 and comp > 0.5: + purities.append(pur) + completenesses.append(comp) + matched_truth.add(best_t) + + # Splits + truth_to_pred = defaultdict(list) + for p in sorted(pred_ids): + if p not in overlap: + continue + for t, e in overlap[p].items(): + if pred_energy[p] > 0 and e / pred_energy[p] > 0.5: + truth_to_pred[t].append(p) + n_split = sum(1 for ps in truth_to_pred.values() if len(ps) > 1) + + # Merges + n_merged = 0 + for p in sorted(pred_ids): + if p not in overlap: + continue + sig = [t for t, e in overlap[p].items() + if pred_energy[p] > 0 and e / pred_energy[p] > 0.1] + if len(sig) > 1: + n_merged += 1 + + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": len(purities), "n_matched_truth": len(matched_truth), + "purities": purities, "completenesses": completenesses, + "n_split": n_split, "n_merged": n_merged, + } + + +def aggregate_results(results_list): + """Aggregate per-graph match results.""" + all_pur = [p for r in results_list for p in r["purities"]] + all_comp = [c for r in results_list for c in r["completenesses"]] + n_pred = sum(r["n_pred"] for r in results_list) + n_truth = sum(r["n_truth"] for r in results_list) + n_mp = sum(r["n_matched_pred"] for r in results_list) + n_mt = sum(r["n_matched_truth"] for r in results_list) + n_split = sum(r["n_split"] for r in results_list) + n_merged = sum(r["n_merged"] for r in results_list) + return { + "n_pred": n_pred, "n_truth": n_truth, + "n_matched_pred": n_mp, "n_matched_truth": n_mt, + "reco_match_rate": n_mp / n_pred if n_pred > 0 else 0, + "truth_match_rate": n_mt / n_truth if n_truth > 0 else 0, + "mean_purity": float(np.mean(all_pur)) if all_pur else 0, + "mean_completeness": float(np.mean(all_comp)) if all_comp else 0, + "n_split": n_split, "n_merged": n_merged, + } + + +def v1_to_v2_path(v1_name, v2_dir): + """Map a v1 ROOT file name to its v2 equivalent. + + v1: nts.mu2e.FlateMinusMix1BBTriggered.MDC2025-002.001430_XXXXXXXX.root + v2: mcs.mu2e.FlateMinusMix1BBTriggered.MDC2025af_best_v1_1.001430_XXXXXXXX.root + """ + # Extract the sequence suffix (last part: 001430_XXXXXXXX) + stem = Path(v1_name).stem + parts = stem.split(".") + seq = parts[-1] # e.g. "001430_00000044" + # Find matching v2 file + v2_dir = Path(v2_dir) + matches = list(v2_dir.glob(f"mcs.*{seq}.root")) + if matches: + return matches[0] + return None + + +def load_model_and_config(config_path, checkpoint_path, device): + """Load a model + config. Returns (model, cfg, tau_edge, tau_node).""" + with open(config_path) as f: + cfg = yaml.safe_load(f) + model = build_model(cfg) + ckpt = torch.load(checkpoint_path, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + + inf_cfg = cfg["inference"] + tau_edge = inf_cfg["tau_edge"] + model_name = cfg["model"].get("name", "SimpleEdgeNet") + has_node_head = model_name == "CaloClusterNet" + lambda_node = cfg.get("train", {}).get("lambda_node", 0.0) + tau_node_cfg = inf_cfg.get("tau_node") + tau_node = tau_node_cfg if (has_node_head and lambda_node > 0) else None + + return model, cfg, tau_edge, tau_node + + +def main(): + parser = argparse.ArgumentParser( + description="Evaluate models against old and new truth definitions") + parser.add_argument("--v2-dir", default="/exp/mu2e/data/users/wzhou2/GNN/root_files_v2") + parser.add_argument("--split", default="val", choices=["val", "test"]) + parser.add_argument("--max-events", type=int, default=500) + parser.add_argument("--output-dir", default="outputs/new_truth_eval") + parser.add_argument("--device", default=None) + args = parser.parse_args() + + device = torch.device(args.device or ("cuda" if torch.cuda.is_available() else "cpu")) + print(f"Device: {device}") + + crystal_map = load_crystal_map("data/crystal_geometry.csv") + crystal_disk_map = {cid: info[0] for cid, info in crystal_map.items()} + + # Load models + models = {} + se_ckpt = "outputs/runs/simple_edge_net_v1/checkpoints/best_model.pt" + ccn_ckpt = "outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt" + if Path(se_ckpt).exists(): + m, c, te, tn = load_model_and_config("configs/default.yaml", se_ckpt, device) + models["SimpleEdgeNet"] = (m, c, te, tn) + print(f"SimpleEdgeNet: tau_edge={te}") + if Path(ccn_ckpt).exists(): + m, c, te, tn = load_model_and_config( + "configs/calo_cluster_net.yaml", ccn_ckpt, device) + models["CaloClusterNet"] = (m, c, te, tn) + print(f"CaloClusterNet: tau_edge={te}") + + # Load normalization stats (use SimpleEdgeNet config as reference) + ref_cfg = list(models.values())[0][1] if models else None + if ref_cfg is None: + print("ERROR: No model checkpoints found") + return + stats = load_stats(ref_cfg["data"]["normalization_stats"]) + graph_cfg = ref_cfg["graph"] + + # Find v2 files for the split + split_file = f"splits/{args.split}_files.txt" + with open(split_file) as f: + v1_files = [l.strip() for l in f if l.strip()] + + v2_files = [] + for v1 in v1_files: + v2 = v1_to_v2_path(v1, args.v2_dir) + if v2 and v2.stat().st_size >= 1800 * 1024 * 1024: + # Verify the file is readable + try: + uproot.open(f"{v2}:EventNtuple/ntuple") + v2_files.append(v2) + except Exception: + print(f" SKIP (corrupt): {v2.name}") + print(f"Split '{args.split}': {len(v1_files)} files, " + f"{len(v2_files)} with complete v2 ROOT files") + + if not v2_files: + print("ERROR: No complete v2 files available for this split") + return + + branches = [ + "calohits.crystalId_", "calohits.eDep_", "calohits.time_", + "calohits.clusterIdx_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calomcsim.id", "calomcsim.ancestorSimIds", + ] + + # Results: {method: {truth_type: [per-graph results]}} + # methods: "BFS", "SimpleEdgeNet", "CaloClusterNet" + # truth types: "old", "new" + results = defaultdict(lambda: defaultdict(list)) + n_graphs = 0 + n_events_total = 0 + t0 = time.time() + + for fi, v2_path in enumerate(v2_files): + print(f" [{fi+1}/{len(v2_files)}] {v2_path.name}...", end=" ", flush=True) + + tree = uproot.open(f"{v2_path}:EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.max_events) + n_events = len(arrays) + n_events_total += n_events + + for ev in range(n_events): + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + continue + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + cluster_idx = np.array(arrays["calohits.clusterIdx_"][ev], dtype=np.int64) + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], dtype=np.float64) + simids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + disks = np.array([crystal_disk_map.get(int(c), -1) + for c in cryids], dtype=np.int64) + + if np.all(xs == 0) and np.all(ys == 0): + for i, c in enumerate(cryids): + if int(c) in crystal_map: + _, xs[i], ys[i] = crystal_map[int(c)] + + # Build calo-root map for this event + sim_ids_evt = arrays["calomcsim.id"][ev] + anc_evt = arrays["calomcsim.ancestorSimIds"][ev] + calo_root_map = build_calo_root_map( + sim_ids_evt, anc_evt, simids, cryids, crystal_disk_map) + + for disk_id in [0, 1]: + dm = disks == disk_id + n_disk = dm.sum() + if n_disk < 2: + continue + + d_e = energies[dm] + d_t = times[dm] + d_x = xs[dm] + d_y = ys[dm] + d_pos = np.stack([d_x, d_y], axis=1) + d_cidx = cluster_idx[dm] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + disk_indices = np.where(dm)[0] + d_simids = [list(simids[i]) for i in disk_indices] + d_edeps = [list(edeps_mc[i]) for i in disk_indices] + + # Build both truth cluster sets + truth_old = build_mc_truth_clusters_old( + d_simids, d_edeps, d_disks, n_disk) + truth_new = build_mc_truth_clusters_new( + d_simids, d_edeps, d_disks, n_disk, calo_root_map) + + # BFS vs both truths + results["BFS"]["old"].append(match_clusters(d_cidx, truth_old, d_e)) + results["BFS"]["new"].append(match_clusters(d_cidx, truth_new, d_e)) + + # GNN models + edge_index, _ = build_graph( + d_pos, d_t, + r_max=graph_cfg["r_max_mm"], dt_max=graph_cfg["dt_max_ns"], + k_min=graph_cfg["k_min"], k_max=graph_cfg["k_max"]) + + if edge_index.shape[1] == 0: + gnn_labels = np.arange(n_disk) + for mname in models: + results[mname]["old"].append( + match_clusters(gnn_labels, truth_old, d_e)) + results[mname]["new"].append( + match_clusters(gnn_labels, truth_new, d_e)) + n_graphs += 1 + continue + + node_feat = compute_node_features(d_pos, d_t, d_e) + edge_feat = compute_edge_features(d_pos, d_t, d_e, edge_index) + + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + ) + normalize_graph(data, stats) + + for mname, (model, cfg_m, tau_edge, tau_node) in models.items(): + with torch.no_grad(): + output = model(data.to(device)) + + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + gnn_labels, _ = reconstruct_clusters( + edge_index=edge_index, + edge_logits=logits_np, + n_nodes=n_disk, + energies=d_e, + tau_edge=tau_edge, + min_hits=1, + min_energy_mev=0.0, + node_logits=node_logits_np, + tau_node=tau_node, + ) + + results[mname]["old"].append( + match_clusters(gnn_labels, truth_old, d_e)) + results[mname]["new"].append( + match_clusters(gnn_labels, truth_new, d_e)) + + n_graphs += 1 + + print(f"{n_events} events") + + elapsed = time.time() - t0 + print(f"\nProcessed {n_graphs} disk-graphs from {n_events_total} events " + f"({len(v2_files)} files) in {elapsed:.1f}s") + + # Print comparison table + print(f"\n{'='*80}") + print(f"OLD TRUTH (SimParticle) vs NEW TRUTH (calo-entrant)") + print(f"{'='*80}") + + header = f"{'Method':<20} {'Truth':<6} {'RecoMR':>8} {'TruthMR':>8} " \ + f"{'Purity':>8} {'Compl':>8} {'Splits':>7} {'Merges':>7} {'N_truth':>8}" + print(header) + print("-" * len(header)) + + out_rows = [] + for mname in ["BFS"] + list(models.keys()): + for ttype in ["old", "new"]: + if not results[mname][ttype]: + continue + agg = aggregate_results(results[mname][ttype]) + line = (f"{mname:<20} {ttype:<6} " + f"{agg['reco_match_rate']:>7.1%} " + f"{agg['truth_match_rate']:>7.1%} " + f"{agg['mean_purity']:>8.4f} " + f"{agg['mean_completeness']:>8.4f} " + f"{agg['n_split']:>7d} " + f"{agg['n_merged']:>7d} " + f"{agg['n_truth']:>8d}") + print(line) + out_rows.append({ + "method": mname, "truth": ttype, + "reco_match_rate": f"{agg['reco_match_rate']:.4f}", + "truth_match_rate": f"{agg['truth_match_rate']:.4f}", + "mean_purity": f"{agg['mean_purity']:.4f}", + "mean_completeness": f"{agg['mean_completeness']:.4f}", + "n_split": agg["n_split"], "n_merged": agg["n_merged"], + "n_truth": agg["n_truth"], "n_pred": agg["n_pred"], + }) + print() + + # Delta table + print(f"\n{'='*80}") + print("DELTA (new - old truth): positive = improvement") + print(f"{'='*80}") + for mname in ["BFS"] + list(models.keys()): + old_r = results[mname].get("old", []) + new_r = results[mname].get("new", []) + if not old_r or not new_r: + continue + old_a = aggregate_results(old_r) + new_a = aggregate_results(new_r) + d_tmr = new_a["truth_match_rate"] - old_a["truth_match_rate"] + d_rmr = new_a["reco_match_rate"] - old_a["reco_match_rate"] + d_pur = new_a["mean_purity"] - old_a["mean_purity"] + d_comp = new_a["mean_completeness"] - old_a["mean_completeness"] + d_split = new_a["n_split"] - old_a["n_split"] + d_merge = new_a["n_merged"] - old_a["n_merged"] + print(f" {mname:<20} TMR: {d_tmr:>+.1%} RMR: {d_rmr:>+.1%} " + f"Pur: {d_pur:>+.4f} Comp: {d_comp:>+.4f} " + f"Splits: {d_split:>+d} Merges: {d_merge:>+d}") + + # Save CSV + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + csv_path = out_dir / "new_truth_comparison.csv" + if out_rows: + with open(csv_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=out_rows[0].keys()) + writer.writeheader() + writer.writerows(out_rows) + print(f"\nSaved results to {csv_path}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/evaluate_run1b.py b/CaloClusterGNN/scripts/evaluate_run1b.py new file mode 100644 index 0000000..4d00546 --- /dev/null +++ b/CaloClusterGNN/scripts/evaluate_run1b.py @@ -0,0 +1,664 @@ +#!/usr/bin/env python3 +""" +Run1B (no-field) evaluation: BFS + SimpleEdgeNet + CaloClusterNet vs calo-entrant truth. + +Evaluates models trained on MDC2025 (with-field) data on the Run1B (no-field) +dataset. Tests generalization to a different physics scenario where electrons +travel straight (no B-field curving). + +Outputs: + - outputs/run1b_eval/run1b_results.csv (overall comparison) + - outputs/run1b_eval/truth_cluster_detail.csv (per-truth-cluster for binning) + - outputs/run1b_eval/run1b_evaluation.png (comparison plots) + +Usage: + source setup_env.sh + OMP_NUM_THREADS=4 PYTHONUNBUFFERED=1 python3 -u scripts/evaluate_run1b.py +""" + +import argparse +import csv +import sys +import time +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import uproot +import yaml + +from src.data.graph_builder import build_graph, compute_edge_features, compute_node_features +from src.data.normalization import load_stats, normalize_graph +from src.data.truth_labels_primary import build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model +from torch_geometric.data import Data + + +def build_mc_truth_clusters(simids, edeps, disks, nhits, + calo_root_map, purity_thresh=0.7): + """Build MC truth cluster labels per hit using calo-entrant truth.""" + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} + next_label = 0 + + for i in range(nhits): + sids = np.array(simids[i]) + deps = np.array(edeps[i], dtype=np.float64) + if len(sids) == 0 or deps.sum() <= 0: + continue + disk = int(disks[i]) + + root_edep = {} + for pid, dep in zip(sids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + + best_root = max(root_edep, key=root_edep.get) + purity = root_edep[best_root] / deps.sum() + if purity < purity_thresh: + continue + key = (disk, best_root) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + + return truth_labels + + +def match_clusters_detail(pred_labels, truth_labels, energies): + """Energy-weighted greedy matching with per-truth-cluster detail.""" + pred_ids = sorted(set(pred_labels[pred_labels >= 0].tolist())) + truth_ids = sorted(set(truth_labels[truth_labels >= 0].tolist())) + + truth_energy = {} + truth_nhits = {} + for tid in truth_ids: + tmask = truth_labels == tid + truth_energy[tid] = float(energies[tmask].sum()) + truth_nhits[tid] = int(tmask.sum()) + + if not pred_ids or not truth_ids: + truth_detail = [ + {"truth_id": tid, "energy": truth_energy[tid], + "n_hits": truth_nhits[tid], "matched": False} + for tid in truth_ids + ] + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": 0, "n_matched_truth": 0, + "purities": [], "completenesses": [], + "n_split": 0, "n_merged": 0, + }, truth_detail + + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + + for i in range(len(energies)): + e = energies[i] + p = pred_labels[i] + t = truth_labels[i] + if p >= 0: + pred_energy[p] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + purities, completenesses = [], [] + matched_truth = set() + for p in sorted(pred_ids): + if p not in overlap: + continue + best_t = max(overlap[p], key=lambda t: overlap[p][t]) + shared = overlap[p][best_t] + pur = shared / pred_energy[p] if pred_energy[p] > 0 else 0 + comp = shared / truth_energy[best_t] if truth_energy[best_t] > 0 else 0 + if pur > 0.5 and comp > 0.5: + purities.append(pur) + completenesses.append(comp) + matched_truth.add(best_t) + + truth_to_pred = defaultdict(list) + for p in sorted(pred_ids): + if p not in overlap: + continue + for t, e in overlap[p].items(): + if pred_energy[p] > 0 and e / pred_energy[p] > 0.5: + truth_to_pred[t].append(p) + n_split = sum(1 for ps in truth_to_pred.values() if len(ps) > 1) + + n_merged = 0 + for p in sorted(pred_ids): + if p not in overlap: + continue + sig = [t for t, e in overlap[p].items() + if pred_energy[p] > 0 and e / pred_energy[p] > 0.1] + if len(sig) > 1: + n_merged += 1 + + truth_detail = [ + {"truth_id": tid, "energy": truth_energy[tid], + "n_hits": truth_nhits[tid], "matched": tid in matched_truth} + for tid in truth_ids + ] + + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": len(purities), "n_matched_truth": len(matched_truth), + "purities": purities, "completenesses": completenesses, + "n_split": n_split, "n_merged": n_merged, + }, truth_detail + + +def aggregate_results(results_list): + """Aggregate per-graph match results into summary stats.""" + all_pur = [p for r in results_list for p in r["purities"]] + all_comp = [c for r in results_list for c in r["completenesses"]] + n_pred = sum(r["n_pred"] for r in results_list) + n_truth = sum(r["n_truth"] for r in results_list) + n_matched_pred = sum(r["n_matched_pred"] for r in results_list) + n_matched_truth = sum(r["n_matched_truth"] for r in results_list) + n_split = sum(r["n_split"] for r in results_list) + n_merged = sum(r["n_merged"] for r in results_list) + + return { + "n_pred": n_pred, "n_truth": n_truth, + "n_matched_pred": n_matched_pred, "n_matched_truth": n_matched_truth, + "reco_match_rate": n_matched_pred / n_pred if n_pred > 0 else 0, + "truth_match_rate": n_matched_truth / n_truth if n_truth > 0 else 0, + "mean_purity": float(np.mean(all_pur)) if all_pur else 0, + "mean_completeness": float(np.mean(all_comp)) if all_comp else 0, + "n_split": n_split, "n_merged": n_merged, + "purities": all_pur, "completenesses": all_comp, + } + + +def binned_match_rate(detail_records, key, bins, labels): + """Compute truth match rate in bins of a given key.""" + results = [] + for i in range(len(bins) - 1): + lo, hi = bins[i], bins[i + 1] + in_bin = [d for d in detail_records if lo <= d[key] < hi] + n_total = len(in_bin) + n_matched = sum(1 for d in in_bin if d["matched"]) + results.append({ + "label": labels[i], + "n_total": n_total, + "n_matched": n_matched, + "match_rate": n_matched / n_total if n_total > 0 else 0, + }) + return results + + +def load_model(config_path, checkpoint_path, device): + """Load model from config and checkpoint.""" + with open(config_path) as f: + cfg = yaml.safe_load(f) + model = build_model(cfg) + ckpt = torch.load(checkpoint_path, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + + inf_cfg = cfg["inference"] + tau_edge = inf_cfg["tau_edge"] + model_name = cfg["model"].get("name", "SimpleEdgeNet") + has_node_head = model_name == "CaloClusterNet" + tau_node_cfg = inf_cfg.get("tau_node") + lambda_node = cfg.get("train", {}).get("lambda_node", 0.0) + tau_node = tau_node_cfg if (has_node_head and lambda_node > 0) else None + + return model, cfg, model_name, tau_edge, tau_node + + +def run_gnn_inference(model, data, device, edge_index, n_disk, energies, + tau_edge, tau_node): + """Run GNN inference and reconstruct clusters.""" + with torch.no_grad(): + output = model(data.to(device)) + + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + labels, _ = reconstruct_clusters( + edge_index=edge_index, + edge_logits=logits_np, + n_nodes=n_disk, + energies=energies, + tau_edge=tau_edge, + min_hits=1, + min_energy_mev=0.0, + node_logits=node_logits_np, + tau_node=tau_node, + ) + return labels + + +def main(): + parser = argparse.ArgumentParser( + description="Run1B evaluation: BFS + both GNNs vs calo-entrant truth") + parser.add_argument("--root-dir", type=str, + default="/exp/mu2e/data/users/wzhou2/GNN/root_files_run1b") + parser.add_argument("--n-events", type=int, default=500, + help="Max events per file (default 500)") + parser.add_argument("--n-files", type=int, default=None, + help="Max number of files (default: all)") + parser.add_argument("--output-dir", type=str, default="outputs/run1b_eval") + args = parser.parse_args() + + device = torch.device("cpu") + print(f"Device: {device}") + + # Load both models + models = {} + for name, cfg_path, ckpt_path in [ + ("SimpleEdgeNet", + "configs/default.yaml", + "outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt"), + ("CaloClusterNet", + "configs/calo_cluster_net.yaml", + "outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt"), + ]: + model, cfg, mname, tau_edge, tau_node = load_model( + cfg_path, ckpt_path, device) + models[name] = { + "model": model, "cfg": cfg, "tau_edge": tau_edge, + "tau_node": tau_node, + } + print(f" {name}: tau_edge={tau_edge}, tau_node={tau_node}") + + # Use graph config from first model (same for both) + graph_cfg = models["SimpleEdgeNet"]["cfg"]["graph"] + stats = load_stats( + models["SimpleEdgeNet"]["cfg"]["data"]["normalization_stats"]) + crystal_map = load_crystal_map("data/crystal_geometry.csv") + crystal_disk_map = {cid: disk for cid, (disk, _, _) in crystal_map.items()} + + # Get file list + root_dir = Path(args.root_dir) + files = sorted(root_dir.glob("*.root")) + if args.n_files: + files = files[:args.n_files] + print(f"\nRun1B files: {len(files)}") + print(f"Max events per file: {args.n_events}") + + branches = [ + "calohits.crystalId_", "calohits.eDep_", "calohits.time_", + "calohits.clusterIdx_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calomcsim.id", "calomcsim.ancestorSimIds", + ] + + # Results per method + all_results = {name: [] for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]} + all_detail = {name: [] for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]} + n_disk_graphs = 0 + n_events_total = 0 + t0 = time.time() + + for fi, fpath in enumerate(files): + print(f" [{fi+1}/{len(files)}] {fpath.name}...", end=" ", flush=True) + + tree = uproot.open(str(fpath) + ":EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.n_events) + n_events = len(arrays) + n_events_total += n_events + + for ev in range(n_events): + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + continue + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + cluster_idx = np.array(arrays["calohits.clusterIdx_"][ev], dtype=np.int64) + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], + dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], + dtype=np.float64) + simids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + sim_ids_evt = arrays["calomcsim.id"][ev] + ancestor_ids_evt = arrays["calomcsim.ancestorSimIds"][ev] + calo_root_map = build_calo_root_map( + sim_ids_evt, ancestor_ids_evt, + simids, cryids, crystal_disk_map) + + disks = np.array([crystal_map[int(c)][0] if int(c) in crystal_map + else -1 for c in cryids], dtype=np.int64) + + if np.all(xs == 0) and np.all(ys == 0): + for i, c in enumerate(cryids): + if int(c) in crystal_map: + _, xs[i], ys[i] = crystal_map[int(c)] + + for disk_id in [0, 1]: + dm = disks == disk_id + n_disk = dm.sum() + if n_disk < 2: + continue + + d_e = energies[dm] + d_t = times[dm] + d_x = xs[dm] + d_y = ys[dm] + d_pos = np.stack([d_x, d_y], axis=1) + d_cidx = cluster_idx[dm] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + disk_indices = np.where(dm)[0] + d_simids = [list(simids[i]) for i in disk_indices] + d_edeps = [list(edeps_mc[i]) for i in disk_indices] + + mc_truth = build_mc_truth_clusters(d_simids, d_edeps, d_disks, + n_disk, calo_root_map) + + # BFS + bfs_match, bfs_td = match_clusters_detail(d_cidx, mc_truth, d_e) + all_results["BFS"].append(bfs_match) + all_detail["BFS"].extend(bfs_td) + + # Build graph (shared by both GNNs) + edge_index, _ = build_graph( + d_pos, d_t, + r_max=graph_cfg["r_max_mm"], dt_max=graph_cfg["dt_max_ns"], + k_min=graph_cfg["k_min"], k_max=graph_cfg["k_max"]) + + if edge_index.shape[1] == 0: + gnn_labels = np.arange(n_disk) + for gnn_name in ["SimpleEdgeNet", "CaloClusterNet"]: + gnn_match, gnn_td = match_clusters_detail( + gnn_labels, mc_truth, d_e) + all_results[gnn_name].append(gnn_match) + all_detail[gnn_name].extend(gnn_td) + n_disk_graphs += 1 + continue + + node_feat = compute_node_features(d_pos, d_t, d_e) + edge_feat = compute_edge_features(d_pos, d_t, d_e, edge_index) + + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + ) + normalize_graph(data, stats) + + # Run both GNNs + for gnn_name in ["SimpleEdgeNet", "CaloClusterNet"]: + m = models[gnn_name] + gnn_labels = run_gnn_inference( + m["model"], data, device, edge_index, n_disk, d_e, + m["tau_edge"], m["tau_node"]) + gnn_match, gnn_td = match_clusters_detail( + gnn_labels, mc_truth, d_e) + all_results[gnn_name].append(gnn_match) + all_detail[gnn_name].extend(gnn_td) + + n_disk_graphs += 1 + + print(f"{n_events} events") + + elapsed = time.time() - t0 + print(f"\nProcessed {n_disk_graphs:,} disk-graphs from {n_events_total:,} " + f"events ({len(files)} files) in {elapsed:.1f}s") + + # Aggregate + agg = {name: aggregate_results(results) for name, results in all_results.items()} + + def print_summary(name, a): + tau = "" + if name in models: + tau = f" (tau_edge={models[name]['tau_edge']})" + print(f"\n{'='*60}") + print(f" {name}{tau} vs MC Truth (calo-entrant)") + print(f"{'='*60}") + print(f" Reco clusters: {a['n_pred']:,}") + print(f" Truth clusters: {a['n_truth']:,}") + print(f" Reco match rate: {a['reco_match_rate']:.1%}") + print(f" Truth match rate: {a['truth_match_rate']:.1%}") + print(f" Mean purity: {a['mean_purity']:.4f}") + print(f" Mean completeness: {a['mean_completeness']:.4f}") + print(f" Splits: {a['n_split']:,}") + print(f" Merges: {a['n_merged']:,}") + + for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]: + print_summary(name, agg[name]) + + # Binned metrics + energy_bins = [0, 50, 200, float("inf")] + energy_labels = ["<50 MeV", "50-200 MeV", ">200 MeV"] + mult_bins = [1, 2, 4, float("inf")] + mult_labels = ["1 hit", "2-3 hits", "4+ hits"] + + print(f"\n{'='*60}") + print(" Energy-binned truth match rate") + print(f"{'='*60}") + print(f" {'Bin':<15} {'BFS':>10} {'SEN':>10} {'CCN':>10} {'N_truth':>10}") + for i in range(len(energy_labels)): + vals = {} + for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]: + bm = binned_match_rate(all_detail[name], "energy", + energy_bins, energy_labels) + vals[name] = bm[i] + print(f" {energy_labels[i]:<15} " + f"{vals['BFS']['match_rate']:>9.1%} " + f"{vals['SimpleEdgeNet']['match_rate']:>9.1%} " + f"{vals['CaloClusterNet']['match_rate']:>9.1%} " + f"{vals['BFS']['n_total']:>10,}") + + print(f"\n{'='*60}") + print(" Multiplicity-binned truth match rate") + print(f"{'='*60}") + print(f" {'Bin':<15} {'BFS':>10} {'SEN':>10} {'CCN':>10} {'N_truth':>10}") + for i in range(len(mult_labels)): + vals = {} + for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]: + bm = binned_match_rate(all_detail[name], "n_hits", + mult_bins, mult_labels) + vals[name] = bm[i] + print(f" {mult_labels[i]:<15} " + f"{vals['BFS']['match_rate']:>9.1%} " + f"{vals['SimpleEdgeNet']['match_rate']:>9.1%} " + f"{vals['CaloClusterNet']['match_rate']:>9.1%} " + f"{vals['BFS']['n_total']:>10,}") + + # Save CSVs + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + csv_path = out_dir / "run1b_results.csv" + rows = [] + for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]: + a = agg[name] + tau = "-" + if name in models: + tau = str(models[name]["tau_edge"]) + rows.append({ + "method": name, "tau_edge": tau, + "n_events": n_events_total, "n_disk_graphs": n_disk_graphs, + "reco_clusters": a["n_pred"], "truth_clusters": a["n_truth"], + "reco_match_rate": f"{a['reco_match_rate']:.4f}", + "truth_match_rate": f"{a['truth_match_rate']:.4f}", + "mean_purity": f"{a['mean_purity']:.4f}", + "mean_completeness": f"{a['mean_completeness']:.4f}", + "n_split": a["n_split"], "n_merged": a["n_merged"], + }) + with open(csv_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=rows[0].keys()) + writer.writeheader() + writer.writerows(rows) + print(f"\nSaved results to {csv_path}") + + # Per-truth-cluster detail + detail_csv = out_dir / "truth_cluster_detail.csv" + with open(detail_csv, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["energy", "n_hits", + "bfs_matched", "sen_matched", "ccn_matched"]) + writer.writeheader() + for bd, sd, cd in zip(all_detail["BFS"], all_detail["SimpleEdgeNet"], + all_detail["CaloClusterNet"]): + writer.writerow({ + "energy": f"{bd['energy']:.4f}", + "n_hits": bd["n_hits"], + "bfs_matched": int(bd["matched"]), + "sen_matched": int(sd["matched"]), + "ccn_matched": int(cd["matched"]), + }) + print(f"Saved truth cluster detail to {detail_csv}") + + # Plot + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + method_colors = {"BFS": "coral", "SimpleEdgeNet": "steelblue", + "CaloClusterNet": "seagreen"} + method_short = {"BFS": "BFS", "SimpleEdgeNet": "SEN", "CaloClusterNet": "CCN"} + + fig, axes = plt.subplots(2, 3, figsize=(20, 12)) + fig.suptitle( + f"Run1B (No Field) Evaluation -- {n_disk_graphs:,} disk-graphs, " + f"{n_events_total:,} events\n" + f"Models trained on MDC2025 (with field), evaluated on Run1B (no field)", + fontsize=13, fontweight="bold") + + # 1. Overall match rates + ax = axes[0, 0] + metrics_names = ["Reco MR", "Truth MR"] + x = np.arange(len(metrics_names)) + w = 0.25 + for i, name in enumerate(["BFS", "SimpleEdgeNet", "CaloClusterNet"]): + a = agg[name] + vals = [a["reco_match_rate"] * 100, a["truth_match_rate"] * 100] + bars = ax.bar(x + (i - 1) * w, vals, w, label=method_short[name], + color=method_colors[name], alpha=0.8) + for j, v in enumerate(vals): + ax.text(x[j] + (i - 1) * w, v + 0.5, f"{v:.1f}%", + ha="center", fontsize=8) + ax.set_xticks(x) + ax.set_xticklabels(metrics_names) + ax.set_ylabel("%") + ax.set_title("Match Rates") + ax.legend(fontsize=9) + ax.grid(alpha=0.3, axis="y") + ax.set_ylim(0, 105) + + # 2. Splits and merges + ax = axes[0, 1] + metrics_names = ["Splits", "Merges"] + x = np.arange(len(metrics_names)) + for i, name in enumerate(["BFS", "SimpleEdgeNet", "CaloClusterNet"]): + a = agg[name] + vals = [a["n_split"], a["n_merged"]] + ax.bar(x + (i - 1) * w, vals, w, label=method_short[name], + color=method_colors[name], alpha=0.8) + for j, v in enumerate(vals): + ax.text(x[j] + (i - 1) * w, v + max(vals) * 0.02, f"{v:,}", + ha="center", fontsize=8) + ax.set_xticks(x) + ax.set_xticklabels(metrics_names) + ax.set_title("Splits & Merges") + ax.legend(fontsize=9) + ax.grid(alpha=0.3, axis="y") + + # 3. Purity distribution + ax = axes[0, 2] + bins_hist = np.linspace(0.5, 1.0, 60) + for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]: + ax.hist(agg[name]["purities"], bins=bins_hist, alpha=0.4, + label=f"{method_short[name]} ({agg[name]['mean_purity']:.4f})", + color=method_colors[name], edgecolor="white") + ax.set_xlabel("Purity") + ax.set_ylabel("Count") + ax.set_title("Cluster Purity Distribution") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + + # 4. Energy-binned truth match rate + ax = axes[1, 0] + x_e = np.arange(len(energy_labels)) + for i, name in enumerate(["BFS", "SimpleEdgeNet", "CaloClusterNet"]): + bm = binned_match_rate(all_detail[name], "energy", + energy_bins, energy_labels) + mr = [b["match_rate"] * 100 for b in bm] + ax.bar(x_e + (i - 1) * w, mr, w, label=method_short[name], + color=method_colors[name], alpha=0.8) + for j, v in enumerate(mr): + ax.text(x_e[j] + (i - 1) * w, v + 1, f"{v:.1f}%", + ha="center", fontsize=7) + ax.set_xticks(x_e) + ax.set_xticklabels(energy_labels) + ax.set_ylabel("Truth match rate (%)") + ax.set_title("Truth Match Rate by Energy") + ax.legend(fontsize=9) + ax.grid(alpha=0.3, axis="y") + ax.set_ylim(0, 105) + + # 5. Multiplicity-binned truth match rate + ax = axes[1, 1] + x_m = np.arange(len(mult_labels)) + for i, name in enumerate(["BFS", "SimpleEdgeNet", "CaloClusterNet"]): + bm = binned_match_rate(all_detail[name], "n_hits", + mult_bins, mult_labels) + mr = [b["match_rate"] * 100 for b in bm] + ax.bar(x_m + (i - 1) * w, mr, w, label=method_short[name], + color=method_colors[name], alpha=0.8) + for j, v in enumerate(mr): + ax.text(x_m[j] + (i - 1) * w, v + 1, f"{v:.1f}%", + ha="center", fontsize=7) + ax.set_xticks(x_m) + ax.set_xticklabels(mult_labels) + ax.set_ylabel("Truth match rate (%)") + ax.set_title("Truth Match Rate by Hit Count") + ax.legend(fontsize=9) + ax.grid(alpha=0.3, axis="y") + ax.set_ylim(0, 105) + + # 6. Summary table + ax = axes[1, 2] + ax.axis("off") + table_data = [] + for metric, key, fmt in [ + ("Reco match rate", "reco_match_rate", "{:.1%}"), + ("Truth match rate", "truth_match_rate", "{:.1%}"), + ("Mean purity", "mean_purity", "{:.4f}"), + ("Mean completeness", "mean_completeness", "{:.4f}"), + ("Splits", "n_split", "{:,}"), + ("Merges", "n_merged", "{:,}"), + ]: + row = [metric] + for name in ["BFS", "SimpleEdgeNet", "CaloClusterNet"]: + row.append(fmt.format(agg[name][key])) + table_data.append(row) + + table = ax.table( + cellText=table_data, + colLabels=["Metric", "BFS", "SEN (0.26)", "CCN (0.20)"], + cellLoc="center", loc="center") + table.auto_set_font_size(False) + table.set_fontsize(10) + table.scale(1, 1.5) + ax.set_title("Run1B Summary", pad=20) + + plt.tight_layout() + plot_path = out_dir / "run1b_evaluation.png" + plt.savefig(plot_path, dpi=150, bbox_inches="tight") + print(f"Saved plot to {plot_path}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/evaluate_test.py b/CaloClusterGNN/scripts/evaluate_test.py new file mode 100644 index 0000000..b083241 --- /dev/null +++ b/CaloClusterGNN/scripts/evaluate_test.py @@ -0,0 +1,750 @@ +#!/usr/bin/env python3 +""" +Test set evaluation: GNN vs BFS, both against MC truth. + +This script is run ONCE to produce the final comparison. Do not iterate. + +For each test-split event/disk: + 1. Extract MC truth clusters from calohitsmc branches + 2. Get BFS reco clusters from calohits.clusterIdx_ + 3. Run GNN inference with frozen tau_edge -> predicted clusters + 4. Evaluate both against MC truth (energy-weighted matching) + 5. Record per-truth-cluster detail for binned metrics + +Outputs: + - outputs/test_eval/test_results.csv (overall BFS vs GNN) + - outputs/test_eval/truth_cluster_detail.csv (per-truth-cluster, for binning) + - outputs/test_eval/test_evaluation.png (comparison plots) + +Usage: + source setup_env.sh + python3 scripts/evaluate_test.py +""" + +import argparse +import csv +import sys +import time +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import uproot +import yaml + +from src.data.graph_builder import build_graph, compute_edge_features, compute_node_features +from src.data.normalization import load_stats, normalize_graph +from src.data.truth_labels_primary import build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model +from torch_geometric.data import Data + + +def build_mc_truth_clusters(simids, edeps, disks, nhits, + calo_root_map, purity_thresh=0.7): + """Build MC truth cluster labels per hit using calo-entrant truth. + + Groups energy deposits by calo-entrant root before computing purity. + + Returns truth_labels (int array, -1 = ambiguous/unassigned). + """ + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} # (disk, calo_root) -> label + next_label = 0 + + for i in range(nhits): + sids = np.array(simids[i]) + deps = np.array(edeps[i], dtype=np.float64) + if len(sids) == 0 or deps.sum() <= 0: + continue + disk = int(disks[i]) + + # Group energy by calo-entrant root + root_edep = {} + for pid, dep in zip(sids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + + best_root = max(root_edep, key=root_edep.get) + purity = root_edep[best_root] / deps.sum() + if purity < purity_thresh: + continue + key = (disk, best_root) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + + return truth_labels + + +def match_clusters_detail(pred_labels, truth_labels, energies): + """Energy-weighted greedy matching with per-truth-cluster detail. + + Returns (aggregate_dict, truth_detail_list). + truth_detail_list: one entry per truth cluster with energy, n_hits, matched. + """ + pred_ids = sorted(set(pred_labels[pred_labels >= 0].tolist())) + truth_ids = sorted(set(truth_labels[truth_labels >= 0].tolist())) + + # Per-truth-cluster properties + truth_energy = {} + truth_nhits = {} + for tid in truth_ids: + tmask = truth_labels == tid + truth_energy[tid] = float(energies[tmask].sum()) + truth_nhits[tid] = int(tmask.sum()) + + if not pred_ids or not truth_ids: + truth_detail = [ + {"truth_id": tid, "energy": truth_energy[tid], + "n_hits": truth_nhits[tid], "matched": False} + for tid in truth_ids + ] + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": 0, "n_matched_truth": 0, + "purities": [], "completenesses": [], + "n_split": 0, "n_merged": 0, + }, truth_detail + + # Build energy overlap + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + + for i in range(len(energies)): + e = energies[i] + p = pred_labels[i] + t = truth_labels[i] + if p >= 0: + pred_energy[p] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + # Greedy match: for each pred, find best truth + purities, completenesses = [], [] + matched_truth = set() + for p in sorted(pred_ids): + if p not in overlap: + continue + best_t = max(overlap[p], key=lambda t: overlap[p][t]) + shared = overlap[p][best_t] + pur = shared / pred_energy[p] if pred_energy[p] > 0 else 0 + comp = shared / truth_energy[best_t] if truth_energy[best_t] > 0 else 0 + if pur > 0.5 and comp > 0.5: + purities.append(pur) + completenesses.append(comp) + matched_truth.add(best_t) + + # Splits + truth_to_pred = defaultdict(list) + for p in sorted(pred_ids): + if p not in overlap: + continue + for t, e in overlap[p].items(): + if pred_energy[p] > 0 and e / pred_energy[p] > 0.5: + truth_to_pred[t].append(p) + n_split = sum(1 for ps in truth_to_pred.values() if len(ps) > 1) + + # Merges + n_merged = 0 + for p in sorted(pred_ids): + if p not in overlap: + continue + sig = [t for t, e in overlap[p].items() + if pred_energy[p] > 0 and e / pred_energy[p] > 0.1] + if len(sig) > 1: + n_merged += 1 + + # Per-truth detail + truth_detail = [ + {"truth_id": tid, "energy": truth_energy[tid], + "n_hits": truth_nhits[tid], "matched": tid in matched_truth} + for tid in truth_ids + ] + + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": len(purities), "n_matched_truth": len(matched_truth), + "purities": purities, "completenesses": completenesses, + "n_split": n_split, "n_merged": n_merged, + }, truth_detail + + +def aggregate_results(results_list): + """Aggregate per-graph match results into summary stats.""" + all_pur = [p for r in results_list for p in r["purities"]] + all_comp = [c for r in results_list for c in r["completenesses"]] + n_pred = sum(r["n_pred"] for r in results_list) + n_truth = sum(r["n_truth"] for r in results_list) + n_matched_pred = sum(r["n_matched_pred"] for r in results_list) + n_matched_truth = sum(r["n_matched_truth"] for r in results_list) + n_split = sum(r["n_split"] for r in results_list) + n_merged = sum(r["n_merged"] for r in results_list) + + return { + "n_pred": n_pred, "n_truth": n_truth, + "n_matched_pred": n_matched_pred, "n_matched_truth": n_matched_truth, + "reco_match_rate": n_matched_pred / n_pred if n_pred > 0 else 0, + "truth_match_rate": n_matched_truth / n_truth if n_truth > 0 else 0, + "mean_purity": float(np.mean(all_pur)) if all_pur else 0, + "median_purity": float(np.median(all_pur)) if all_pur else 0, + "mean_completeness": float(np.mean(all_comp)) if all_comp else 0, + "median_completeness": float(np.median(all_comp)) if all_comp else 0, + "n_split": n_split, "n_merged": n_merged, + "purities": all_pur, "completenesses": all_comp, + } + + +def binned_match_rate(detail_records, key, bins, labels): + """Compute truth match rate in bins of a given key. + + Returns list of dicts: [{label, n_total, n_matched, match_rate}, ...] + """ + results = [] + for i in range(len(bins) - 1): + lo, hi = bins[i], bins[i + 1] + in_bin = [d for d in detail_records if lo <= d[key] < hi] + n_total = len(in_bin) + n_matched = sum(1 for d in in_bin if d["matched"]) + results.append({ + "label": labels[i], + "n_total": n_total, + "n_matched": n_matched, + "match_rate": n_matched / n_total if n_total > 0 else 0, + }) + return results + + +def main(): + parser = argparse.ArgumentParser( + description="Test set evaluation: GNN vs BFS (run once)") + parser.add_argument("--root-dir", type=str, + default="/exp/mu2e/data/users/wzhou2/GNN/root_files_v2") + parser.add_argument("--checkpoint", type=str, + default="outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt") + parser.add_argument("--config", type=str, default="configs/default.yaml") + parser.add_argument("--output-dir", type=str, default=None, + help="Output directory (default: outputs/test_eval_)") + parser.add_argument("--n-events", type=int, default=None, + help="Max events per file (default: all). " + "500-1000 gives stable statistics.") + parser.add_argument("--device", type=str, default=None, + help="Force device (cpu/cuda). Auto-detects if omitted.") + args = parser.parse_args() + + with open(args.config) as f: + cfg = yaml.safe_load(f) + + if args.device: + device = torch.device(args.device) + elif torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + print(f"Device: {device}") + + # Load model + model = build_model(cfg) + ckpt = torch.load(args.checkpoint, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + model_name = cfg["model"].get("name", "SimpleEdgeNet") + print(f"Model: {model_name}") + print(f"Loaded from epoch {ckpt['epoch']} (val F1={ckpt['val_f1']:.4f})") + + # Frozen inference parameters + inf_cfg = cfg["inference"] + tau_edge = inf_cfg["tau_edge"] + # tau_node only used when explicitly set in config AND model has node head + has_node_head = model_name == "CaloClusterNet" + tau_node_cfg = inf_cfg.get("tau_node") + # Only apply tau_node if the training config had lambda_node > 0 + lambda_node = cfg.get("train", {}).get("lambda_node", 0.0) + tau_node = tau_node_cfg if (has_node_head and lambda_node > 0) else None + print(f"Frozen tau_edge = {tau_edge}") + if tau_node is not None: + print(f"Frozen tau_node = {tau_node}") + else: + print(f"tau_node = disabled") + + # Output directory (model-specific default) + if args.output_dir is None: + suffix = model_name.lower() + args.output_dir = f"outputs/test_eval_{suffix}" + + # Load normalization stats and crystal map + stats = load_stats(cfg["data"]["normalization_stats"]) + crystal_map = load_crystal_map("data/crystal_geometry.csv") + graph_cfg = cfg["graph"] + + # Load test file list + with open(cfg["data"]["splits"]["test"]) as f: + test_files = [l.strip() for l in f if l.strip()] + print(f"Test files: {len(test_files)}") + if args.n_events: + print(f"Max events per file: {args.n_events}") + + branches = [ + "calohits.crystalId_", "calohits.eDep_", "calohits.time_", + "calohits.clusterIdx_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calomcsim.id", "calomcsim.ancestorSimIds", + ] + + # Crystal disk map for calo-entrant truth + crystal_disk_map = {cid: disk for cid, (disk, _, _) in crystal_map.items()} + + bfs_results = [] + gnn_results = [] + # Per-truth-cluster detail for binned metrics + bfs_truth_detail = [] + gnn_truth_detail = [] + n_disk_graphs = 0 + t0 = time.time() + + for fi, fpath in enumerate(test_files): + fname = Path(fpath).name + local_path = str(Path(args.root_dir) / fname) + print(f" [{fi+1}/{len(test_files)}] {fname}...", end=" ", flush=True) + + tree = uproot.open(local_path + ":EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.n_events) + n_events = len(arrays) + + for ev in range(n_events): + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + continue + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + cluster_idx = np.array(arrays["calohits.clusterIdx_"][ev], dtype=np.int64) + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], + dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], + dtype=np.float64) + simids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + # Build calo-entrant root map for this event + sim_ids_evt = arrays["calomcsim.id"][ev] + ancestor_ids_evt = arrays["calomcsim.ancestorSimIds"][ev] + calo_root_map = build_calo_root_map( + sim_ids_evt, ancestor_ids_evt, + simids, cryids, crystal_disk_map) + + disks = np.array([crystal_map[int(c)][0] if int(c) in crystal_map + else -1 for c in cryids], dtype=np.int64) + + # Fallback positions + if np.all(xs == 0) and np.all(ys == 0): + for i, c in enumerate(cryids): + if int(c) in crystal_map: + _, xs[i], ys[i] = crystal_map[int(c)] + + for disk_id in [0, 1]: + dm = disks == disk_id + n_disk = dm.sum() + if n_disk < 2: + continue + + d_e = energies[dm] + d_t = times[dm] + d_x = xs[dm] + d_y = ys[dm] + d_pos = np.stack([d_x, d_y], axis=1) + d_cidx = cluster_idx[dm] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + disk_indices = np.where(dm)[0] + d_simids = [list(simids[i]) for i in disk_indices] + d_edeps = [list(edeps_mc[i]) for i in disk_indices] + + # MC truth clusters (calo-entrant truth) + mc_truth = build_mc_truth_clusters(d_simids, d_edeps, d_disks, + n_disk, calo_root_map) + + # ── BFS ── + bfs_match, bfs_td = match_clusters_detail(d_cidx, mc_truth, d_e) + bfs_results.append(bfs_match) + bfs_truth_detail.extend(bfs_td) + + # ── GNN ── + edge_index, _ = build_graph( + d_pos, d_t, + r_max=graph_cfg["r_max_mm"], dt_max=graph_cfg["dt_max_ns"], + k_min=graph_cfg["k_min"], k_max=graph_cfg["k_max"]) + + if edge_index.shape[1] == 0: + # No edges — each node is its own cluster + gnn_labels = np.arange(n_disk) + gnn_match, gnn_td = match_clusters_detail( + gnn_labels, mc_truth, d_e) + gnn_results.append(gnn_match) + gnn_truth_detail.extend(gnn_td) + n_disk_graphs += 1 + continue + + node_feat = compute_node_features(d_pos, d_t, d_e) + edge_feat = compute_edge_features(d_pos, d_t, d_e, edge_index) + + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + ) + normalize_graph(data, stats) + + # Model inference + with torch.no_grad(): + output = model(data.to(device)) + + # Handle both dict (CaloClusterNet) and tensor (SimpleEdgeNet) + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + gnn_labels, _ = reconstruct_clusters( + edge_index=edge_index, + edge_logits=logits_np, + n_nodes=n_disk, + energies=d_e, + tau_edge=tau_edge, + min_hits=1, + min_energy_mev=0.0, + node_logits=node_logits_np, + tau_node=tau_node, + ) + + gnn_match, gnn_td = match_clusters_detail( + gnn_labels, mc_truth, d_e) + gnn_results.append(gnn_match) + gnn_truth_detail.extend(gnn_td) + n_disk_graphs += 1 + + print(f"{n_events} events") + + elapsed = time.time() - t0 + print(f"\nProcessed {n_disk_graphs} disk-graphs from {len(test_files)} files " + f"in {elapsed:.1f}s") + + # ── Aggregate overall metrics ── + bfs_agg = aggregate_results(bfs_results) + gnn_agg = aggregate_results(gnn_results) + + def print_summary(name, agg): + print(f"\n{'='*60}") + print(f" {name} vs MC Truth") + print(f"{'='*60}") + print(f" Reco clusters: {agg['n_pred']:,}") + print(f" Truth clusters: {agg['n_truth']:,}") + print(f" Reco match rate: {agg['reco_match_rate']:.1%}") + print(f" Truth match rate: {agg['truth_match_rate']:.1%}") + print(f" Mean purity: {agg['mean_purity']:.4f}") + print(f" Median purity: {agg['median_purity']:.4f}") + print(f" Mean completeness: {agg['mean_completeness']:.4f}") + print(f" Median completeness: {agg['median_completeness']:.4f}") + print(f" Splits: {agg['n_split']:,}") + print(f" Merges: {agg['n_merged']:,}") + + print_summary("BFS", bfs_agg) + print_summary(f"GNN (tau_edge={tau_edge})", gnn_agg) + + # ── Binned metrics ── + energy_bins = [0, 50, 200, float("inf")] + energy_labels = ["<50 MeV", "50-200 MeV", ">200 MeV"] + # Hit multiplicity bins (use n_hits directly) + mult_bins = [1, 2, 4, float("inf")] + mult_labels = ["1 hit", "2-3 hits", "4+ hits"] + + bfs_energy_bins = binned_match_rate(bfs_truth_detail, "energy", + energy_bins, energy_labels) + gnn_energy_bins = binned_match_rate(gnn_truth_detail, "energy", + energy_bins, energy_labels) + bfs_mult_bins = binned_match_rate(bfs_truth_detail, "n_hits", + mult_bins, mult_labels) + gnn_mult_bins = binned_match_rate(gnn_truth_detail, "n_hits", + mult_bins, mult_labels) + + print(f"\n{'='*60}") + print(" Energy-binned truth match rate") + print(f"{'='*60}") + print(f" {'Bin':<15} {'BFS':>12} {'GNN':>12} {'N_truth':>10}") + for b, g in zip(bfs_energy_bins, gnn_energy_bins): + print(f" {b['label']:<15} {b['match_rate']:>11.1%} " + f"{g['match_rate']:>11.1%} {b['n_total']:>10,}") + + print(f"\n{'='*60}") + print(" Multiplicity-binned truth match rate") + print(f"{'='*60}") + print(f" {'Bin':<15} {'BFS':>12} {'GNN':>12} {'N_truth':>10}") + for b, g in zip(bfs_mult_bins, gnn_mult_bins): + print(f" {b['label']:<15} {b['match_rate']:>11.1%} " + f"{g['match_rate']:>11.1%} {b['n_total']:>10,}") + + # ── Save CSVs ── + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + # Overall results + csv_path = out_dir / "test_results.csv" + rows = [] + for name, agg in [("BFS", bfs_agg), ("GNN", gnn_agg)]: + rows.append({ + "method": name, + "tau_edge": "-" if name == "BFS" else f"{tau_edge}", + "reco_clusters": agg["n_pred"], + "truth_clusters": agg["n_truth"], + "reco_match_rate": f"{agg['reco_match_rate']:.4f}", + "truth_match_rate": f"{agg['truth_match_rate']:.4f}", + "mean_purity": f"{agg['mean_purity']:.4f}", + "median_purity": f"{agg['median_purity']:.4f}", + "mean_completeness": f"{agg['mean_completeness']:.4f}", + "median_completeness": f"{agg['median_completeness']:.4f}", + "n_split": agg["n_split"], + "n_merged": agg["n_merged"], + }) + with open(csv_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=rows[0].keys()) + writer.writeheader() + writer.writerows(rows) + print(f"\nSaved overall results to {csv_path}") + + # Per-truth-cluster detail + detail_csv = out_dir / "truth_cluster_detail.csv" + with open(detail_csv, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["energy", "n_hits", "bfs_matched", "gnn_matched"]) + writer.writeheader() + for bd, gd in zip(bfs_truth_detail, gnn_truth_detail): + writer.writerow({ + "energy": f"{bd['energy']:.4f}", + "n_hits": bd["n_hits"], + "bfs_matched": int(bd["matched"]), + "gnn_matched": int(gd["matched"]), + }) + print(f"Saved truth cluster detail to {detail_csv}") + + # ── Plot ── + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + fig, axes = plt.subplots(3, 3, figsize=(20, 18)) + fig.suptitle(f"Test Set Evaluation — GNN ($\\tau_{{edge}}$={tau_edge}) vs BFS\n" + f"{n_disk_graphs:,} disk-graphs from {len(test_files)} files", + fontsize=14, fontweight="bold") + + # ── Row 1: overall distributions ── + + # 1. Purity distribution + ax = axes[0, 0] + bins_hist = np.linspace(0.5, 1.0, 60) + ax.hist(bfs_agg["purities"], bins=bins_hist, alpha=0.6, label="BFS", + color="coral", edgecolor="white") + ax.hist(gnn_agg["purities"], bins=bins_hist, alpha=0.6, label="GNN", + color="steelblue", edgecolor="white") + ax.axvline(bfs_agg["mean_purity"], color="red", linestyle="--", linewidth=2) + ax.axvline(gnn_agg["mean_purity"], color="blue", linestyle="--", linewidth=2) + ax.set_xlabel("Purity") + ax.set_ylabel("Count") + ax.set_title("Cluster Purity (vs MC truth)") + ax.legend() + ax.grid(alpha=0.3) + + # 2. Completeness distribution + ax = axes[0, 1] + ax.hist(bfs_agg["completenesses"], bins=bins_hist, alpha=0.6, label="BFS", + color="coral", edgecolor="white") + ax.hist(gnn_agg["completenesses"], bins=bins_hist, alpha=0.6, label="GNN", + color="steelblue", edgecolor="white") + ax.axvline(bfs_agg["mean_completeness"], color="red", linestyle="--", + linewidth=2) + ax.axvline(gnn_agg["mean_completeness"], color="blue", linestyle="--", + linewidth=2) + ax.set_xlabel("Completeness") + ax.set_ylabel("Count") + ax.set_title("Cluster Completeness (vs MC truth)") + ax.legend() + ax.grid(alpha=0.3) + + # 3. Overall match rates + splits/merges + ax = axes[0, 2] + metrics = ["Reco\nmatch %", "Truth\nmatch %", "Splits", "Merges"] + bfs_vals = [bfs_agg["reco_match_rate"] * 100, + bfs_agg["truth_match_rate"] * 100, + bfs_agg["n_split"], bfs_agg["n_merged"]] + gnn_vals = [gnn_agg["reco_match_rate"] * 100, + gnn_agg["truth_match_rate"] * 100, + gnn_agg["n_split"], gnn_agg["n_merged"]] + x = np.arange(len(metrics)) + w = 0.35 + bars_bfs = ax.bar(x - w / 2, bfs_vals, w, label="BFS", color="coral", + alpha=0.8) + bars_gnn = ax.bar(x + w / 2, gnn_vals, w, label="GNN", color="steelblue", + alpha=0.8) + for i, (b, g) in enumerate(zip(bfs_vals, gnn_vals)): + fmt = f"{b:.1f}%" if i < 2 else f"{int(b):,}" + ax.text(x[i] - w / 2, b + max(bfs_vals + gnn_vals) * 0.02, fmt, + ha="center", fontsize=8) + fmt = f"{g:.1f}%" if i < 2 else f"{int(g):,}" + ax.text(x[i] + w / 2, g + max(bfs_vals + gnn_vals) * 0.02, fmt, + ha="center", fontsize=8) + ax.set_xticks(x) + ax.set_xticklabels(metrics) + ax.set_title("Match Rates & Error Modes") + ax.legend() + ax.grid(alpha=0.3, axis="y") + + # ── Row 2: energy-binned metrics ── + + x_e = np.arange(len(energy_labels)) + + # 4. Energy-binned truth match rate + ax = axes[1, 0] + bfs_mr = [b["match_rate"] * 100 for b in bfs_energy_bins] + gnn_mr = [g["match_rate"] * 100 for g in gnn_energy_bins] + ax.bar(x_e - w / 2, bfs_mr, w, label="BFS", color="coral", alpha=0.8) + ax.bar(x_e + w / 2, gnn_mr, w, label="GNN", color="steelblue", alpha=0.8) + for i, (b, g) in enumerate(zip(bfs_mr, gnn_mr)): + ax.text(x_e[i] - w / 2, b + 1, f"{b:.1f}%", ha="center", fontsize=8) + ax.text(x_e[i] + w / 2, g + 1, f"{g:.1f}%", ha="center", fontsize=8) + ax.set_xticks(x_e) + ax.set_xticklabels(energy_labels) + ax.set_ylabel("Truth match rate (%)") + ax.set_title("Truth Match Rate by Cluster Energy") + ax.legend() + ax.grid(alpha=0.3, axis="y") + ax.set_ylim(0, 105) + + # 5. Energy-binned counts (stacked bar showing matched vs unmatched) + ax = axes[1, 1] + for method, detail, color, offset in [ + ("BFS", bfs_energy_bins, "coral", -w / 2), + ("GNN", gnn_energy_bins, "steelblue", w / 2)]: + matched = [b["n_matched"] for b in detail] + unmatched = [b["n_total"] - b["n_matched"] for b in detail] + ax.bar(x_e + offset, matched, w, label=f"{method} matched", + color=color, alpha=0.8) + ax.bar(x_e + offset, unmatched, w, bottom=matched, + label=f"{method} unmatched", color=color, alpha=0.3, + edgecolor=color, linewidth=0.5) + ax.set_xticks(x_e) + ax.set_xticklabels(energy_labels) + ax.set_ylabel("Truth clusters") + ax.set_title("Truth Cluster Counts by Energy") + ax.legend(fontsize=8, ncol=2) + ax.grid(alpha=0.3, axis="y") + + # 6. Energy bin truth cluster size distribution + ax = axes[1, 2] + for label, lo, hi, color in [ + ("<50 MeV", 0, 50, "#1b9e77"), + ("50-200 MeV", 50, 200, "#d95f02"), + (">200 MeV", 200, float("inf"), "#7570b3")]: + nhits = [d["n_hits"] for d in gnn_truth_detail + if lo <= d["energy"] < hi] + if nhits: + ax.hist(nhits, bins=range(1, max(nhits) + 2), alpha=0.5, + label=f"{label} (n={len(nhits):,})", color=color, + edgecolor="white") + ax.set_xlabel("Hits per truth cluster") + ax.set_ylabel("Count") + ax.set_title("Truth Cluster Size by Energy Bin") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + ax.set_xlim(0.5, 15.5) + + # ── Row 3: multiplicity-binned metrics ── + + x_m = np.arange(len(mult_labels)) + + # 7. Multiplicity-binned truth match rate + ax = axes[2, 0] + bfs_mr_m = [b["match_rate"] * 100 for b in bfs_mult_bins] + gnn_mr_m = [g["match_rate"] * 100 for g in gnn_mult_bins] + ax.bar(x_m - w / 2, bfs_mr_m, w, label="BFS", color="coral", alpha=0.8) + ax.bar(x_m + w / 2, gnn_mr_m, w, label="GNN", color="steelblue", alpha=0.8) + for i, (b, g) in enumerate(zip(bfs_mr_m, gnn_mr_m)): + ax.text(x_m[i] - w / 2, b + 1, f"{b:.1f}%", ha="center", fontsize=8) + ax.text(x_m[i] + w / 2, g + 1, f"{g:.1f}%", ha="center", fontsize=8) + ax.set_xticks(x_m) + ax.set_xticklabels(mult_labels) + ax.set_ylabel("Truth match rate (%)") + ax.set_title("Truth Match Rate by Cluster Hit Count") + ax.legend() + ax.grid(alpha=0.3, axis="y") + ax.set_ylim(0, 105) + + # 8. Per-graph purity scatter + ax = axes[2, 1] + bfs_pg = [np.mean(r["purities"]) if r["purities"] else 1.0 + for r in bfs_results] + gnn_pg = [np.mean(r["purities"]) if r["purities"] else 1.0 + for r in gnn_results] + ax.scatter(bfs_pg, gnn_pg, alpha=0.15, s=8, color="navy") + ax.plot([0.5, 1], [0.5, 1], "r--", alpha=0.5) + ax.set_xlabel("BFS purity") + ax.set_ylabel("GNN purity") + ax.set_title("Per-Graph Purity: GNN vs BFS") + ax.set_xlim(0.5, 1.02) + ax.set_ylim(0.5, 1.02) + ax.grid(alpha=0.3) + + # 9. Summary table + ax = axes[2, 2] + ax.axis("off") + table_data = [ + ["Reco clusters", f"{bfs_agg['n_pred']:,}", f"{gnn_agg['n_pred']:,}"], + ["Truth clusters", f"{bfs_agg['n_truth']:,}", f"{gnn_agg['n_truth']:,}"], + ["Reco match rate", f"{bfs_agg['reco_match_rate']:.1%}", + f"{gnn_agg['reco_match_rate']:.1%}"], + ["Truth match rate", f"{bfs_agg['truth_match_rate']:.1%}", + f"{gnn_agg['truth_match_rate']:.1%}"], + ["Mean purity", f"{bfs_agg['mean_purity']:.4f}", + f"{gnn_agg['mean_purity']:.4f}"], + ["Mean completeness", f"{bfs_agg['mean_completeness']:.4f}", + f"{gnn_agg['mean_completeness']:.4f}"], + ["Splits", f"{bfs_agg['n_split']:,}", f"{gnn_agg['n_split']:,}"], + ["Merges", f"{bfs_agg['n_merged']:,}", f"{gnn_agg['n_merged']:,}"], + ] + table = ax.table(cellText=table_data, colLabels=["Metric", "BFS", "GNN"], + cellLoc="center", loc="center") + table.auto_set_font_size(False) + table.set_fontsize(11) + table.scale(1, 1.5) + # Highlight winning cells + for i, row in enumerate(table_data): + try: + bv = float(row[1].rstrip("%").replace(",", "")) + gv = float(row[2].rstrip("%").replace(",", "")) + metric = row[0].lower() + if "split" in metric or "merge" in metric: + winner = 1 if bv <= gv else 2 + else: + winner = 1 if bv >= gv else 2 + table[i + 1, winner].set_facecolor("#d4edda") + except ValueError: + pass + ax.set_title("Summary Comparison", pad=20) + + plt.tight_layout() + plot_path = out_dir / "test_evaluation.png" + plt.savefig(plot_path, dpi=150, bbox_inches="tight") + print(f"Saved plot to {plot_path}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/failure_audit.py b/CaloClusterGNN/scripts/failure_audit.py new file mode 100644 index 0000000..f60d154 --- /dev/null +++ b/CaloClusterGNN/scripts/failure_audit.py @@ -0,0 +1,561 @@ +#!/usr/bin/env python3 +""" +Failure audit for edge-classification GNN clustering. + +Answers five questions: + 1. Are merges caused by a few bridge edges, or many? + 2. Where do bad bridge edges live (spatial, energy, time)? + 3. How does threshold choice affect the merge/split balance? + 4. Are failures concentrated in tiny truth objects? + 5. Is the truth definition creating artificial errors? + +Usage: + source setup_env.sh + python3 scripts/failure_audit.py + python3 scripts/failure_audit.py --config configs/calo_cluster_net.yaml \ + --checkpoint outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt +""" + +import argparse +import json +import sys +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import yaml + +from src.data.dataset import CaloGraphDataset +from src.data.normalization import load_stats, normalize_graph +from src.inference.cluster_reco import symmetrize_edge_scores +from src.models import build_model + + +def load_model_and_data(args): + """Load model, config, and val dataset.""" + with open(args.config) as f: + cfg = yaml.safe_load(f) + + device = torch.device(args.device) + + # Load val data + val_packed = Path(cfg["data"]["processed_dir"]) / "val.pt" + val_files = [l.strip() for l in open(cfg["data"]["splits"]["val"]) if l.strip()] + val_dataset = CaloGraphDataset( + cfg["data"]["processed_dir"], file_list=val_files, preload=True, + packed_path=val_packed if val_packed.exists() else None, + ) + + # Normalize + stats = load_stats(cfg["data"]["normalization_stats"]) + for data in val_dataset._cache: + normalize_graph(data, stats) + + # Load model + model = build_model(cfg) + ckpt_path = args.checkpoint + ckpt = torch.load(ckpt_path, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device) + model.eval() + + tau_edge = cfg["inference"]["tau_edge"] + + return model, val_dataset, cfg, device, tau_edge + + +def run_inference(model, data, device): + """Run model forward pass, return edge probs and logits.""" + data_dev = data.clone().to(device) + with torch.no_grad(): + output = model(data_dev) + if isinstance(output, dict): + logits = output["edge_logits"].cpu().numpy() + else: + logits = output.cpu().numpy() + probs = 1.0 / (1.0 + np.exp(-logits.astype(np.float64))) + return logits, probs + + +def analyze_graph(data, logits, probs, tau_edge): + """Analyze a single graph for merge/split failures. + + Returns a dict with per-graph audit results, or None if graph is trivial. + """ + edge_index = data.edge_index.numpy() + truth = data.hit_truth_cluster.numpy() + n_nodes = data.x.shape[0] + + # Raw node features (pre-normalization stored in data, but we have + # normalized data — use what we can) + # x features: [log_energy, time, x, y, radial_dist, relative_energy] + # These are z-scored, but relative comparisons still work + + # Symmetrize edges + ei_sym, ep_sym = symmetrize_edge_scores(edge_index, probs) + src_s, dst_s = ei_sym[0], ei_sym[1] + + # Threshold → connected components + from scipy.sparse import coo_matrix + from scipy.sparse.csgraph import connected_components + + keep = ep_sym >= tau_edge + if keep.sum() == 0: + return None + + src_k, dst_k = src_s[keep], dst_s[keep] + src_both = np.concatenate([src_k, dst_k]) + dst_both = np.concatenate([dst_k, src_k]) + adj = coo_matrix( + (np.ones(len(src_both)), (src_both, dst_both)), + shape=(n_nodes, n_nodes), + ) + _, pred_labels = connected_components(adj, directed=False) + + # Identify truth clusters (exclude unassigned = -1) + truth_ids = np.unique(truth[truth >= 0]) + pred_ids = np.unique(pred_labels) + + # === Find merges === + # A merge = one predicted cluster containing hits from 2+ truth clusters + merges = [] + for pid in pred_ids: + pmask = pred_labels == pid + truth_in_pred = truth[pmask] + # Exclude unassigned + valid_truth = truth_in_pred[truth_in_pred >= 0] + if len(valid_truth) == 0: + continue + unique_truth = np.unique(valid_truth) + if len(unique_truth) <= 1: + continue + + # This is a merge: pred cluster `pid` fuses truth clusters `unique_truth` + # Find bridge edges: predicted-positive edges crossing truth boundaries + bridge_edges = [] + for e_idx in range(len(src_s)): + if ep_sym[e_idx] < tau_edge: + continue + i, j = src_s[e_idx], dst_s[e_idx] + if pred_labels[i] != pid or pred_labels[j] != pid: + continue + ti, tj = truth[i], truth[j] + if ti < 0 or tj < 0: + continue + if ti != tj: + bridge_edges.append({ + "src": int(i), "dst": int(j), + "score": float(ep_sym[e_idx]), + "truth_src": int(ti), "truth_dst": int(tj), + }) + + # Truth cluster properties + fused_clusters = [] + for tid in unique_truth: + tmask = truth == tid + fused_clusters.append({ + "truth_id": int(tid), + "n_hits": int(tmask.sum()), + }) + + merges.append({ + "pred_id": int(pid), + "n_truth_fused": len(unique_truth), + "n_bridge_edges": len(bridge_edges), + "bridge_edges": bridge_edges, + "bridge_scores": [be["score"] for be in bridge_edges], + "fused_clusters": fused_clusters, + }) + + # === Find splits === + # A split = one truth cluster whose hits end up in 2+ predicted clusters + splits = [] + for tid in truth_ids: + tmask = truth == tid + preds_for_truth = pred_labels[tmask] + unique_preds = np.unique(preds_for_truth) + if len(unique_preds) <= 1: + continue + splits.append({ + "truth_id": int(tid), + "n_hits": int(tmask.sum()), + "n_pred_fragments": len(unique_preds), + }) + + # === Edge-level stats for all symmetric edges === + # Classify each edge by truth + n_tp = 0 # true positive: same truth, predicted positive + n_fp = 0 # false positive: different truth, predicted positive + n_fn = 0 # false negative: same truth, predicted negative + n_tn = 0 + fp_scores = [] + fn_scores = [] + + for e_idx in range(len(src_s)): + i, j = src_s[e_idx], dst_s[e_idx] + ti, tj = truth[i], truth[j] + if ti < 0 or tj < 0: + continue # skip unassigned + same = (ti == tj) + positive = (ep_sym[e_idx] >= tau_edge) + if same and positive: + n_tp += 1 + elif not same and positive: + n_fp += 1 + fp_scores.append(float(ep_sym[e_idx])) + elif same and not positive: + n_fn += 1 + fn_scores.append(float(ep_sym[e_idx])) + else: + n_tn += 1 + + # === Truth cluster size distribution === + truth_sizes = {} + for tid in truth_ids: + truth_sizes[int(tid)] = int((truth == tid).sum()) + + return { + "n_nodes": n_nodes, + "n_truth_clusters": len(truth_ids), + "n_pred_clusters": len(pred_ids), + "merges": merges, + "splits": splits, + "n_tp": n_tp, "n_fp": n_fp, "n_fn": n_fn, "n_tn": n_tn, + "fp_scores": fp_scores, + "fn_scores": fn_scores, + "truth_sizes": truth_sizes, + } + + +def threshold_sweep(model, dataset, device, thresholds): + """Sweep thresholds and count merges/splits at each.""" + from scipy.sparse import coo_matrix + from scipy.sparse.csgraph import connected_components + + results = [] + # Pre-compute all edge probs + all_data = [] + for data in dataset: + _, probs = run_inference(model, data, device) + ei = data.edge_index.numpy() + ei_sym, ep_sym = symmetrize_edge_scores(ei, probs) + all_data.append((data, ei_sym, ep_sym)) + + for tau in thresholds: + total_merges = 0 + total_splits = 0 + total_fp = 0 + total_fn = 0 + total_tp = 0 + for data, ei_sym, ep_sym in all_data: + truth = data.hit_truth_cluster.numpy() + n_nodes = data.x.shape[0] + keep = ep_sym >= tau + if keep.sum() == 0: + # All nodes isolated → count splits + for tid in np.unique(truth[truth >= 0]): + if (truth == tid).sum() > 1: + total_splits += 1 + continue + + src_k, dst_k = ei_sym[0, keep], ei_sym[1, keep] + sb = np.concatenate([src_k, dst_k]) + db = np.concatenate([dst_k, src_k]) + adj = coo_matrix((np.ones(len(sb)), (sb, db)), shape=(n_nodes, n_nodes)) + _, pred = connected_components(adj, directed=False) + + truth_ids = np.unique(truth[truth >= 0]) + # Merges + for pid in np.unique(pred): + pmask = pred == pid + vt = truth[pmask] + vt = vt[vt >= 0] + if len(np.unique(vt)) > 1: + total_merges += 1 + # Splits + for tid in truth_ids: + tmask = truth == tid + if len(np.unique(pred[tmask])) > 1: + total_splits += 1 + + # FP/FN + src_s, dst_s = ei_sym[0], ei_sym[1] + for e_idx in range(len(src_s)): + i, j = src_s[e_idx], dst_s[e_idx] + ti, tj = truth[i], truth[j] + if ti < 0 or tj < 0: + continue + same = (ti == tj) + pos = (ep_sym[e_idx] >= tau) + if same and pos: + total_tp += 1 + elif not same and pos: + total_fp += 1 + elif same and not pos: + total_fn += 1 + + prec = total_tp / max(total_tp + total_fp, 1) + rec = total_tp / max(total_tp + total_fn, 1) + f1 = 2 * prec * rec / max(prec + rec, 1e-10) + results.append({ + "tau": tau, "merges": total_merges, "splits": total_splits, + "fp": total_fp, "fn": total_fn, "tp": total_tp, + "precision": prec, "recall": rec, "f1": f1, + }) + print(f" τ={tau:.2f}: merges={total_merges}, splits={total_splits}, " + f"P={prec:.3f} R={rec:.3f} F1={f1:.3f}") + + return results + + +def main(): + parser = argparse.ArgumentParser(description="GNN failure audit") + parser.add_argument("--config", default="configs/calo_cluster_net.yaml") + parser.add_argument("--checkpoint", + default="outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt") + parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu") + parser.add_argument("--max-graphs", type=int, default=None, + help="Limit number of graphs to analyze (default: all)") + args = parser.parse_args() + + model, val_dataset, cfg, device, tau_edge = load_model_and_data(args) + print(f"Model: {cfg['model']['name']}, τ_edge={tau_edge}") + print(f"Val graphs: {len(val_dataset)}") + print(f"Device: {device}") + + out_dir = Path("outputs/failure_audit") + out_dir.mkdir(parents=True, exist_ok=True) + + # ==================================================================== + # Run per-graph analysis + # ==================================================================== + print(f"\n{'='*70}") + print("Running per-graph failure analysis...") + print(f"{'='*70}") + + all_results = [] + n_graphs = args.max_graphs or len(val_dataset) + for i in range(min(n_graphs, len(val_dataset))): + data = val_dataset[i] + logits, probs = run_inference(model, data, device) + result = analyze_graph(data, logits, probs, tau_edge) + if result is not None: + all_results.append(result) + if (i + 1) % 1000 == 0: + print(f" {i+1}/{n_graphs} graphs processed...") + + print(f" {len(all_results)} graphs analyzed (non-trivial)") + + # ==================================================================== + # Q1: Are merges caused by a few bridge edges? + # ==================================================================== + print(f"\n{'='*70}") + print("Q1: Bridge edge analysis for merges") + print(f"{'='*70}") + + all_merges = [] + for r in all_results: + all_merges.extend(r["merges"]) + + n_total_merges = len(all_merges) + if n_total_merges > 0: + bridge_counts = [m["n_bridge_edges"] for m in all_merges] + bridge_scores_flat = [] + for m in all_merges: + bridge_scores_flat.extend(m["bridge_scores"]) + + print(f"Total merges: {n_total_merges}") + print(f"Bridge edges per merge:") + print(f" mean={np.mean(bridge_counts):.1f}, " + f"median={np.median(bridge_counts):.0f}, " + f"max={np.max(bridge_counts)}") + for n in [1, 2, 3, 4, 5]: + pct = 100 * np.mean(np.array(bridge_counts) == n) + print(f" exactly {n}: {pct:.1f}%") + pct_le2 = 100 * np.mean(np.array(bridge_counts) <= 2) + print(f" <=2 bridge edges: {pct_le2:.1f}%") + + if bridge_scores_flat: + scores = np.array(bridge_scores_flat) + print(f"\nBridge edge scores:") + print(f" mean={scores.mean():.3f}, median={np.median(scores):.3f}") + print(f" min={scores.min():.3f}, max={scores.max():.3f}") + near_thresh = np.mean((scores >= tau_edge) & (scores < tau_edge + 0.1)) + confident = np.mean(scores >= 0.8) + print(f" near threshold ({tau_edge:.2f}-{tau_edge+0.1:.2f}): " + f"{100*near_thresh:.1f}%") + print(f" confident (>=0.8): {100*confident:.1f}%") + else: + print("No merges found.") + + # ==================================================================== + # Q2: Where do bad bridge edges live? + # ==================================================================== + print(f"\n{'='*70}") + print("Q2: False positive edge properties") + print(f"{'='*70}") + + all_fp_scores = [] + for r in all_results: + all_fp_scores.extend(r["fp_scores"]) + + total_fp = sum(r["n_fp"] for r in all_results) + total_tp = sum(r["n_tp"] for r in all_results) + total_fn = sum(r["n_fn"] for r in all_results) + total_tn = sum(r["n_tn"] for r in all_results) + + print(f"Edge classification (all val, τ={tau_edge}):") + print(f" TP={total_tp}, FP={total_fp}, FN={total_fn}, TN={total_tn}") + prec = total_tp / max(total_tp + total_fp, 1) + rec = total_tp / max(total_tp + total_fn, 1) + print(f" Precision={prec:.4f}, Recall={rec:.4f}") + + if all_fp_scores: + fps = np.array(all_fp_scores) + print(f"\nFalse positive edge scores (N={len(fps)}):") + print(f" mean={fps.mean():.3f}, median={np.median(fps):.3f}") + for lo, hi in [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (0.6, 0.7), + (0.7, 0.8), (0.8, 0.9), (0.9, 1.0)]: + n = np.sum((fps >= lo) & (fps < hi)) + print(f" [{lo:.1f}, {hi:.1f}): {n} ({100*n/len(fps):.1f}%)") + + # ==================================================================== + # Q3: Threshold sweep + # ==================================================================== + print(f"\n{'='*70}") + print("Q3: Threshold sweep (merge/split trade-off)") + print(f"{'='*70}") + + thresholds = [0.20, 0.25, 0.30, 0.34, 0.40, 0.45, 0.50, + 0.55, 0.60, 0.65, 0.70, 0.75, 0.80] + sweep = threshold_sweep(model, val_dataset, device, thresholds) + + # ==================================================================== + # Q4: Are failures concentrated in tiny truth objects? + # ==================================================================== + print(f"\n{'='*70}") + print("Q4: Failure stratification by truth cluster size") + print(f"{'='*70}") + + # Collect all truth clusters and whether they were part of a merge or split + merge_truth_ids = set() + for r in all_results: + for m in r["merges"]: + for fc in m["fused_clusters"]: + merge_truth_ids.add((id(r), fc["truth_id"])) + + split_truth_ids = set() + for r in all_results: + for s in r["splits"]: + split_truth_ids.add((id(r), s["truth_id"])) + + # Stratify by size + size_stats = defaultdict(lambda: {"total": 0, "in_merge": 0, "in_split": 0}) + for r in all_results: + for tid, sz in r["truth_sizes"].items(): + bucket = str(sz) if sz <= 5 else "6+" + size_stats[bucket]["total"] += 1 + if (id(r), tid) in merge_truth_ids: + size_stats[bucket]["in_merge"] += 1 + if (id(r), tid) in split_truth_ids: + size_stats[bucket]["in_split"] += 1 + + print(f"{'Size':>6} {'Total':>8} {'Merged':>8} {'%':>6} {'Split':>8} {'%':>6}") + for sz in ["1", "2", "3", "4", "5", "6+"]: + s = size_stats[sz] + t = s["total"] + m = s["in_merge"] + sp = s["in_split"] + mp = 100 * m / max(t, 1) + spp = 100 * sp / max(t, 1) + print(f"{sz:>6} {t:>8} {m:>8} {mp:>5.1f}% {sp:>8} {spp:>5.1f}%") + + # ==================================================================== + # Q5: Are "merge errors" actually ambiguous physics? + # ==================================================================== + print(f"\n{'='*70}") + print("Q5: Merge anatomy — are fused clusters physically close?") + print(f"{'='*70}") + + # For merges, characterize the fused truth clusters + fused_pair_sizes = [] + n_fused_2 = 0 # merges fusing exactly 2 truth clusters + n_fused_3plus = 0 + n_both_small = 0 # both fused clusters have <=2 hits + n_one_singleton = 0 # at least one is a single-hit cluster + + for m in all_merges: + sizes = [fc["n_hits"] for fc in m["fused_clusters"]] + fused_pair_sizes.append(sizes) + if len(sizes) == 2: + n_fused_2 += 1 + if max(sizes) <= 2: + n_both_small += 1 + else: + n_fused_3plus += 1 + if min(sizes) == 1: + n_one_singleton += 1 + + if n_total_merges > 0: + print(f"Total merges: {n_total_merges}") + print(f" fusing exactly 2 truth clusters: {n_fused_2} " + f"({100*n_fused_2/n_total_merges:.1f}%)") + print(f" fusing 3+ truth clusters: {n_fused_3plus} " + f"({100*n_fused_3plus/n_total_merges:.1f}%)") + print(f" at least one singleton: {n_one_singleton} " + f"({100*n_one_singleton/n_total_merges:.1f}%)") + print(f" both <=2 hits (pairwise): {n_both_small} " + f"({100*n_both_small/n_total_merges:.1f}%)") + + # ==================================================================== + # Summary + # ==================================================================== + print(f"\n{'='*70}") + print("SUMMARY") + print(f"{'='*70}") + + total_truth = sum(len(r["truth_sizes"]) for r in all_results) + total_merges_clusters = sum( + sum(len(m["fused_clusters"]) for m in r["merges"]) + for r in all_results + ) + total_splits_count = sum(len(r["splits"]) for r in all_results) + + print(f"Graphs analyzed: {len(all_results)}") + print(f"Total truth clusters: {total_truth}") + print(f"Total merge events: {n_total_merges}") + print(f"Total split events: {total_splits_count}") + if n_total_merges > 0: + median_bridges = int(np.median(bridge_counts)) + print(f"Median bridge edges per merge: {median_bridges}") + le2 = np.mean(np.array(bridge_counts) <= 2) + print(f"Merges caused by <=2 bridge edges: {100*le2:.1f}%") + if bridge_scores_flat: + print(f"Median bridge edge score: {np.median(bridge_scores_flat):.3f} " + f"(threshold: {tau_edge})") + + # Save raw results + summary = { + "model": cfg["model"]["name"], + "tau_edge": tau_edge, + "n_graphs": len(all_results), + "total_truth_clusters": total_truth, + "total_merges": n_total_merges, + "total_splits": total_splits_count, + "bridge_counts": bridge_counts if n_total_merges > 0 else [], + "bridge_scores": bridge_scores_flat if n_total_merges > 0 else [], + "fp_scores": all_fp_scores, + "threshold_sweep": sweep, + "size_stratification": dict(size_stats), + } + summary_path = out_dir / "audit_summary.json" + with open(summary_path, "w") as f: + json.dump(summary, f, indent=2, default=str) + print(f"\nFull results saved to {summary_path}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/make_slide_plots.py b/CaloClusterGNN/scripts/make_slide_plots.py new file mode 100644 index 0000000..dff0b32 --- /dev/null +++ b/CaloClusterGNN/scripts/make_slide_plots.py @@ -0,0 +1,369 @@ +"""Regenerate presentation-quality plots for the slide deck. + +Reads existing run histories and evaluation CSVs and writes large-font, +clean figures to outputs/slide_plots/. One-off, no GPU required. +""" +from __future__ import annotations + +import json +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +PROJECT = Path(__file__).resolve().parents[1] +OUT = PROJECT / "outputs" / "slide_plots" +OUT.mkdir(parents=True, exist_ok=True) + +# Presentation-friendly defaults +plt.rcParams.update({ + "font.size": 14, + "axes.titlesize": 16, + "axes.labelsize": 15, + "xtick.labelsize": 13, + "ytick.labelsize": 13, + "legend.fontsize": 13, + "figure.dpi": 130, + "savefig.dpi": 150, + "savefig.bbox": "tight", + "axes.spines.top": False, + "axes.spines.right": False, + "lines.linewidth": 2.2, +}) + +METHOD_COLORS = { + "BFS": "#4C4C4C", + "SimpleEdgeNet": "#1f77b4", + "SEN+BFS10": "#5fbcd3", + "CaloClusterNet": "#d95f02", + "CCN+BFS10": "#1b7837", +} +METHOD_LABEL = { + "BFS": "BFS (baseline)", + "SimpleEdgeNet": "SimpleEdgeNet", + "SEN+BFS10": "SEN+BFS10", + "CaloClusterNet": "CaloClusterNet", + "CCN+BFS10": "CCN+BFS10 (mine)", +} + + +def training_curves() -> None: + """Single clean F1-vs-epoch plot for both models.""" + sen = json.loads((PROJECT / "outputs/runs/simple_edge_net_v2/history.json").read_text()) + ccn = json.loads((PROJECT / "outputs/runs/calo_cluster_net_v2_stage1/history.json").read_text()) + + fig, axes = plt.subplots(1, 2, figsize=(11, 4.2)) + + for ax, (h, label, color) in zip( + axes, + [ + (sen, "SimpleEdgeNet", METHOD_COLORS["SimpleEdgeNet"]), + (ccn, "CaloClusterNet (Stage 1)", METHOD_COLORS["CaloClusterNet"]), + ], + ): + ep = [r["epoch"] for r in h] + f1_train = [r["train"]["f1"] for r in h] + f1_val = [r["val"]["f1"] for r in h] + best_idx = int(np.argmax(f1_val)) + ax.plot(ep, f1_train, color=color, alpha=0.55, label="train") + ax.plot(ep, f1_val, color=color, linewidth=2.6, label="val") + ax.axvline(ep[best_idx], color="k", linestyle=":", alpha=0.5) + ax.scatter([ep[best_idx]], [f1_val[best_idx]], color="red", zorder=5, s=80, + label=f"best val: {f1_val[best_idx]:.3f}") + ax.set_title(label) + ax.set_xlabel("Epoch") + ax.set_ylabel("Edge F1") + ax.set_ylim(0.85, 1.0) + ax.grid(True, alpha=0.3) + ax.legend(loc="lower right", framealpha=0.95) + + fig.suptitle("Training: validation edge F1 saturates within 10–15 epochs", y=1.02, fontsize=15) + fig.savefig(OUT / "training_curves_clean.png") + plt.close(fig) + print("wrote", OUT / "training_curves_clean.png") + + +def load_residuals() -> pd.DataFrame: + full = PROJECT / "outputs/cluster_physics_eval_test_full/cluster_residuals.csv" + if full.exists(): + return pd.read_csv(full) + return pd.read_csv(PROJECT / "outputs/cluster_physics_eval_bfs_test/cluster_residuals.csv") + + +def energy_residual_hist(df: pd.DataFrame) -> None: + """dE distribution: BFS vs CCN+BFS10, excluding exact-match cluster pairs. + + Exact matches (dE == 0) account for >90% of matched clusters and hide + the actual error distribution. Drop them so the tail comparison reads + directly on a linear scale. + """ + methods = ["BFS", "CCN+BFS10"] + # Range covers 99.99% of imperfect dE values; 1 MeV bins keep the visual clean. + bins = np.linspace(-50, 50, 101) + nonzero = df["dE"].abs() > 1e-9 + + fig, axes = plt.subplots(1, 2, figsize=(12, 4.8)) + for title, outer_mask, ax in [ + ("All non-trivial matched clusters", + np.ones(len(df), bool), axes[0]), + (r"Track-seeding ($E_\mathrm{reco}\geq50$ MeV, non-trivial)", + df["reco_energy"] >= 50, axes[1]), + ]: + for m in methods: + # Overall mean (over ALL matched clusters in the regime) --- matches + # what's reported in the summary tables. + overall = df[(df["method"] == m) & outer_mask] + shown = df[(df["method"] == m) & outer_mask & nonzero] + ax.hist( + shown["dE"], bins=bins, histtype="step", linewidth=2.5, + color=METHOD_COLORS[m], + label=f"{METHOD_LABEL[m]}: mean |dE|={overall['dE'].abs().mean():.3f} MeV", + ) + ax.set_xlabel(r"$\Delta E = E_\mathrm{reco} - E_\mathrm{truth}$ (MeV)") + ax.set_ylabel("Clusters / 1 MeV") + ax.set_title(title) + ax.set_xlim(-50, 50) + ax.set_yscale("log") + ax.grid(True, which="both", alpha=0.25) + ax.legend(loc="lower center", bbox_to_anchor=(0.5, 1.06), ncol=1, + framealpha=0.0, fontsize=11.5, handlelength=1.8) + fig.suptitle("Energy residual distributions (exact matches excluded from plot)", + fontsize=16, y=1.07) + fig.subplots_adjust(top=0.78) + fig.savefig(OUT / "energy_residual_hist.png") + plt.close(fig) + print("wrote", OUT / "energy_residual_hist.png") + + +def centroid_residual_hist(df: pd.DataFrame) -> None: + """dr distribution: BFS vs CCN+BFS10, excluding exact-match (dr=0) pairs. + + Same treatment as the energy plot --- exact matches drown out the + shape of the actual error distribution. + """ + methods = ["BFS", "CCN+BFS10"] + bins = np.linspace(0, 15, 76) + nonzero = df["dr"] > 1e-6 + + fig, axes = plt.subplots(1, 2, figsize=(12, 4.8)) + for title, outer_mask, ax in [ + ("All non-trivial matched clusters", + np.ones(len(df), bool), axes[0]), + (r"Track-seeding ($E_\mathrm{reco}\geq50$ MeV, non-trivial)", + df["reco_energy"] >= 50, axes[1]), + ]: + for m in methods: + overall = df[(df["method"] == m) & outer_mask] + shown = df[(df["method"] == m) & outer_mask & nonzero] + ax.hist( + shown["dr"], bins=bins, histtype="step", linewidth=2.5, + color=METHOD_COLORS[m], + label=f"{METHOD_LABEL[m]}: mean dr={overall['dr'].mean():.3f} mm", + ) + ax.set_xlabel(r"$\Delta r$ (mm)") + ax.set_ylabel("Clusters / 0.2 mm") + ax.set_title(title) + ax.set_xlim(0, 15) + ax.grid(True, alpha=0.25) + ax.legend(loc="lower center", bbox_to_anchor=(0.5, 1.06), ncol=1, + framealpha=0.0, fontsize=11.5, handlelength=1.8) + ymax = ax.get_ylim()[1] + ax.set_ylim(0, ymax * 1.05) + fig.suptitle("Centroid displacement distributions (exact matches excluded from plot)", + fontsize=16, y=1.07) + fig.subplots_adjust(top=0.78) + fig.savefig(OUT / "centroid_residual_hist.png") + plt.close(fig) + print("wrote", OUT / "centroid_residual_hist.png") + + +def energy_binned_dE(df: pd.DataFrame) -> None: + """Grouped bar chart: mean |dE| by truth energy bin, all 5 methods.""" + methods = ["BFS", "SimpleEdgeNet", "SEN+BFS10", "CaloClusterNet", "CCN+BFS10"] + edges = [0, 25, 50, 75, 100, 125] + centers = [(a + b) / 2 for a, b in zip(edges[:-1], edges[1:])] + width = 4.0 # MeV per bar + + fig, ax = plt.subplots(figsize=(11, 4.8)) + n_methods = len(methods) + offsets = (np.arange(n_methods) - (n_methods - 1) / 2) * width + + for i, m in enumerate(methods): + sub = df[df["method"] == m] + means = [] + for lo, hi in zip(edges[:-1], edges[1:]): + sel = sub[(sub["truth_energy"] >= lo) & (sub["truth_energy"] < hi)] + means.append(sel["dE"].abs().mean() if len(sel) else 0.0) + ax.bar(np.array(centers) + offsets[i], means, width=width, + color=METHOD_COLORS[m], label=METHOD_LABEL[m], edgecolor="white", linewidth=0.5) + + ax.set_xticks(centers) + ax.set_xticklabels([f"{a}–{b}" for a, b in zip(edges[:-1], edges[1:])]) + ax.set_xlabel("Truth cluster energy (MeV)") + ax.set_ylabel(r"Mean $|\Delta E|$ (MeV)") + # Legend above the bars, title above the legend + ax.legend(loc="lower center", bbox_to_anchor=(0.5, 1.04), + ncol=5, framealpha=0.0, fontsize=11) + ax.grid(True, axis="y", alpha=0.3) + ymax = ax.get_ylim()[1] + ax.set_ylim(0, ymax * 1.05) + fig.suptitle("Mean energy error vs truth cluster energy (test set)", + fontsize=15, y=1.02) + fig.subplots_adjust(top=0.82) + fig.savefig(OUT / "energy_binned_dE.png") + plt.close(fig) + print("wrote", OUT / "energy_binned_dE.png") + + +def improvement_bars() -> None: + """Horizontal bar chart of CCN+BFS10 % improvement vs BFS.""" + metrics = [ + ("Splits", 38.0, "Standard"), + ("Merges", 8.4, "Standard"), + (r"Mean $|\Delta E|$ (all)", 19.0, "All clusters"), + (r"Std $\Delta E$ (all)", 15.3, "All clusters"), + (r"Mean $\Delta r$ (all)", 17.2, "All clusters"), + (r"Mean $|\Delta E|$ ($E\geq$50 MeV)", 26.5, "Track-seeding"), + (r"95th $|\Delta E|$ tail", 33.6, "Track-seeding"), + (r"95th $\Delta r$ tail", 36.4, "Track-seeding"), + (r"Frac $|\Delta E|>10$ MeV", 30.1, "Track-seeding"), + (r"Mean $|\Delta E|$ (signal 95–110)", 43.0, "Signal region"), + (r"Mean $\Delta r$ (signal 95–110)", 18.0, "Signal region"), + ] + cat_color = { + "Standard": "#4C4C4C", + "All clusters": "#1f77b4", + "Track-seeding": "#1b7837", + "Signal region": "#d62728", + } + + labels = [m[0] for m in metrics] + vals = [m[1] for m in metrics] + colors = [cat_color[m[2]] for m in metrics] + y = np.arange(len(metrics))[::-1] # top-to-bottom order + + fig, ax = plt.subplots(figsize=(10.5, 6)) + ax.barh(y, vals, color=colors, edgecolor="white", linewidth=0.6) + for yi, v in zip(y, vals): + ax.text(v + 0.6, yi, f"-{v:g}%", va="center", ha="left", fontsize=12.5, fontweight="bold") + ax.set_yticks(y) + ax.set_yticklabels(labels) + ax.set_xlabel("Reduction vs BFS (%)") + ax.set_xlim(0, max(vals) + 10) + ax.grid(True, axis="x", alpha=0.3) + + handles = [plt.Rectangle((0, 0), 1, 1, color=c) for c in cat_color.values()] + ax.legend(handles, list(cat_color.keys()), + loc="lower center", bbox_to_anchor=(0.5, 1.04), + ncol=len(cat_color), framealpha=0.0, fontsize=11.5) + fig.suptitle("CCN+BFS10 improvement over BFS (test set, 276,688 events)", + fontsize=15, y=1.02) + fig.subplots_adjust(top=0.84) + + fig.savefig(OUT / "improvement_bars.png") + plt.close(fig) + print("wrote", OUT / "improvement_bars.png") + + +def signal_region_focus(df: pd.DataFrame) -> None: + """Mean |dE| in the conversion-electron signal region: BFS vs methods.""" + sub = df[(df["truth_energy"] >= 95) & (df["truth_energy"] <= 110)] + methods = ["BFS", "SimpleEdgeNet", "SEN+BFS10", "CaloClusterNet", "CCN+BFS10"] + means = [sub[sub["method"] == m]["dE"].abs().mean() for m in methods] + drs = [sub[sub["method"] == m]["dr"].mean() for m in methods] + counts = [(sub["method"] == m).sum() for m in methods] + bfs_mean = means[0] + bfs_dr = drs[0] + + fig, axes = plt.subplots(1, 2, figsize=(12, 4.6)) + + # |dE| bars + ax = axes[0] + bars = ax.bar(range(len(methods)), means, + color=[METHOD_COLORS[m] for m in methods], + edgecolor="white", linewidth=0.6) + ax.axhline(bfs_mean, color="k", linestyle="--", alpha=0.5, label=f"BFS = {bfs_mean:.3f}") + for i, (b, v) in enumerate(zip(bars, means)): + delta = (v - bfs_mean) / bfs_mean * 100 if i > 0 else 0 + label = f"{v:.3f}" + if i > 0: + label += f"\n({delta:+.0f}%)" + ax.text(b.get_x() + b.get_width() / 2, v + 0.005, label, + ha="center", va="bottom", fontsize=11.5, fontweight="bold") + ax.set_xticks(range(len(methods))) + ax.set_xticklabels(methods, rotation=20, ha="right") + ax.set_ylabel(r"Mean $|\Delta E|$ (MeV)") + ax.set_title(f"Energy error in signal region (95–110 MeV, N={counts[0]})") + ax.set_ylim(0, max(means) * 1.25) + ax.grid(True, axis="y", alpha=0.3) + ax.legend(loc="lower right", fontsize=11, framealpha=0.95) + + # Δr bars + ax = axes[1] + bars = ax.bar(range(len(methods)), drs, + color=[METHOD_COLORS[m] for m in methods], + edgecolor="white", linewidth=0.6) + ax.axhline(bfs_dr, color="k", linestyle="--", alpha=0.5, label=f"BFS = {bfs_dr:.3f}") + for i, (b, v) in enumerate(zip(bars, drs)): + delta = (v - bfs_dr) / bfs_dr * 100 if i > 0 else 0 + label = f"{v:.3f}" + if i > 0: + label += f"\n({delta:+.0f}%)" + ax.text(b.get_x() + b.get_width() / 2, v + 0.012, label, + ha="center", va="bottom", fontsize=11.5, fontweight="bold") + ax.set_xticks(range(len(methods))) + ax.set_xticklabels(methods, rotation=20, ha="right") + ax.set_ylabel(r"Mean $\Delta r$ (mm)") + ax.set_title("Centroid displacement in signal region") + ax.set_ylim(0, max(drs) * 1.25) + ax.grid(True, axis="y", alpha=0.3) + ax.legend(loc="lower right", fontsize=11, framealpha=0.95) + + fig.savefig(OUT / "signal_region_focus.png") + plt.close(fig) + print("wrote", OUT / "signal_region_focus.png") + + +def cluster_size_distribution(df: pd.DataFrame) -> None: + """Histogram of truth cluster size (singleton problem visualization).""" + sub = df[df["method"] == "BFS"] # truth cluster sizes are method-independent at top-K + sizes = sub["truth_nhits"].values + + fig, ax = plt.subplots(figsize=(9.5, 4.5)) + bins = np.arange(0.5, 15.5, 1) + ax.hist(sizes, bins=bins, color="#888888", edgecolor="white") + n_singleton = int((sizes == 1).sum()) + pct = n_singleton / len(sizes) * 100 + ax.axvline(1, color="red", linestyle="--", linewidth=2) + # Annotate far from the tall singleton bar + ax.text(5.5, ax.get_ylim()[1] * 0.72, + f"singletons: {n_singleton:,}\n({pct:.1f}% of all clusters)", + color="red", fontsize=13, fontweight="bold", + ha="left", va="center", + bbox=dict(facecolor="white", edgecolor="red", boxstyle="round,pad=0.4")) + ax.set_xlabel("Truth cluster size (hits)") + ax.set_ylabel("Number of clusters") + ax.set_title("Truth cluster size distribution (calo-entrant truth, test set)") + ax.set_xlim(0.5, 14.5) + ax.grid(True, axis="y", alpha=0.3) + fig.savefig(OUT / "cluster_size_distribution.png") + plt.close(fig) + print("wrote", OUT / "cluster_size_distribution.png") + + +def main() -> None: + training_curves() + df = load_residuals() + print(f"loaded {len(df):,} cluster residuals across {df['method'].nunique()} methods") + energy_residual_hist(df) + centroid_residual_hist(df) + energy_binned_dE(df) + improvement_bars() + signal_region_focus(df) + cluster_size_distribution(df) + print("\nAll plots written to", OUT) + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/pack_graphs.py b/CaloClusterGNN/scripts/pack_graphs.py new file mode 100644 index 0000000..f58de91 --- /dev/null +++ b/CaloClusterGNN/scripts/pack_graphs.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +""" +Pack individual .pt graph files into one file per split. + +Reduces ~29K torch.load() calls to 1, eliminating NFS I/O bottleneck. + +Usage: + python3 scripts/pack_graphs.py + python3 scripts/pack_graphs.py --splits train val # specific splits + +Output: + data/processed/train.pt (list of Data objects) + data/processed/val.pt + data/processed/test.pt +""" + +import argparse +import sys +import time +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import torch +import yaml + +from src.data.dataset import CaloGraphDataset + + +def pack_split(processed_dir, split_files, split_name): + """Load all graphs for a split and save as a single list.""" + t0 = time.time() + ds = CaloGraphDataset(processed_dir, file_list=split_files) + n = len(ds) + print(f" {split_name}: {n} graphs, loading...", end=" ", flush=True) + + graphs = [ds.get(i) for i in range(n)] + elapsed_load = time.time() - t0 + print(f"loaded in {elapsed_load:.1f}s,", end=" ", flush=True) + + out_path = Path(processed_dir) / f"{split_name}.pt" + torch.save(graphs, out_path) + size_mb = out_path.stat().st_size / 1e6 + print(f"saved {size_mb:.1f} MB to {out_path} ({time.time() - t0:.1f}s total)") + return n + + +def main(): + parser = argparse.ArgumentParser(description="Pack graph files into split bundles") + parser.add_argument("--config", default="configs/default.yaml") + parser.add_argument("--splits", nargs="+", default=["train", "val", "test"]) + args = parser.parse_args() + + with open(args.config) as f: + cfg = yaml.safe_load(f) + + processed_dir = cfg["data"]["processed_dir"] + + for split in args.splits: + split_path = cfg["data"]["splits"][split] + with open(split_path) as f: + file_list = [line.strip() for line in f if line.strip()] + pack_split(processed_dir, file_list, split) + + print("\nDone. Update train_gnn.py to use packed=True or load directly:") + print(" graphs = torch.load('data/processed/train.pt', weights_only=False)") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/plot_crystal_map.py b/CaloClusterGNN/scripts/plot_crystal_map.py new file mode 100644 index 0000000..4f878fe --- /dev/null +++ b/CaloClusterGNN/scripts/plot_crystal_map.py @@ -0,0 +1,168 @@ +""" +Plot crystal map for both calorimeter disks with crystal IDs labeled. + +Draws each crystal as a square patch (pitch ~34.3 mm) at its true +position, colored by type (CsI vs CAPHRI). + +Usage: + source setup_env.sh + python3 scripts/plot_crystal_map.py +""" + +import sys +from pathlib import Path + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +from matplotlib.patches import Rectangle, FancyBboxPatch +from matplotlib.collections import PatchCollection +import matplotlib.colors as mcolors +import numpy as np +import csv + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +CRYSTAL_PITCH = 34.3 # mm — measured nearest-neighbor distance +CRYSTAL_SIZE = CRYSTAL_PITCH * 0.92 # slight gap between crystals +CAPHRI_IDS = {582, 609, 610, 637} + + +def load_geometry(csv_path): + """Load crystal geometry CSV into arrays.""" + ids, disks, xs, ys = [], [], [], [] + with open(csv_path) as f: + reader = csv.DictReader(f) + for row in reader: + ids.append(int(row["crystalId"])) + disks.append(int(row["diskId"])) + xs.append(float(row["x_mm"])) + ys.append(float(row["y_mm"])) + return np.array(ids), np.array(disks), np.array(xs), np.array(ys) + + +def plot_disk(ax, crystal_ids, xs, ys, disk_id, label_fontsize=3.0, + xlim=None, ylim=None): + """Draw one calorimeter disk: crystal patches colored by type (CsI/CAPHRI), labeled with IDs.""" + half = CRYSTAL_SIZE / 2 + + csi_patches = [] + caphri_patches = [] + + for cid, x, y in zip(crystal_ids, xs, ys): + if xlim and (x < xlim[0] - CRYSTAL_PITCH or x > xlim[1] + CRYSTAL_PITCH): + continue + if ylim and (y < ylim[0] - CRYSTAL_PITCH or y > ylim[1] + CRYSTAL_PITCH): + continue + + rect = Rectangle((x - half, y - half), CRYSTAL_SIZE, CRYSTAL_SIZE) + if cid in CAPHRI_IDS: + caphri_patches.append(rect) + else: + csi_patches.append(rect) + + ax.text(x, y, str(cid), fontsize=label_fontsize, ha="center", + va="center", color="black", zorder=3, fontweight="bold", + clip_on=True) + + if csi_patches: + pc = PatchCollection(csi_patches, facecolor="#6baed6", edgecolor="white", + linewidth=0.4, zorder=2) + ax.add_collection(pc) + if caphri_patches: + pc = PatchCollection(caphri_patches, facecolor="#fc8d59", edgecolor="white", + linewidth=0.4, zorder=2) + ax.add_collection(pc) + + ax.set_aspect("equal") + ax.autoscale_view() + if xlim: + ax.set_xlim(xlim) + if ylim: + ax.set_ylim(ylim) + ax.set_xlabel("x (mm)", fontsize=11) + ax.set_ylabel("y (mm)", fontsize=11) + ax.grid(True, linewidth=0.3, alpha=0.3, zorder=0) + ax.set_facecolor("#f7f7f7") + + +def make_legend(): + """Create legend handles for CsI and CAPHRI.""" + from matplotlib.patches import Patch + return [ + Patch(facecolor="#6baed6", edgecolor="white", label="CsI crystal"), + Patch(facecolor="#fc8d59", edgecolor="white", label="CAPHRI crystal"), + ] + + +def main(): + csv_path = Path("data/crystal_geometry.csv") + out_dir = Path("outputs/crystal_map/") + out_dir.mkdir(parents=True, exist_ok=True) + + ids, disks, xs, ys = load_geometry(csv_path) + legend_elements = make_legend() + + # ── Combined figure (both disks side by side) ──────────────── + fig, axes = plt.subplots(1, 2, figsize=(28, 14)) + for di, ax in enumerate(axes): + mask = disks == di + plot_disk(ax, ids[mask], xs[mask], ys[mask], di, label_fontsize=3.5) + ax.set_title(f"Disk {di} ({mask.sum()} crystals)", fontsize=13) + + fig.legend(handles=legend_elements, loc="upper center", ncol=2, + fontsize=11, frameon=True) + fig.suptitle("Mu2e Calorimeter Crystal Map", fontsize=16, y=0.98) + fig.tight_layout(rect=[0, 0, 1, 0.95]) + combined_path = out_dir / "crystal_map_both_disks.png" + fig.savefig(combined_path, dpi=200, bbox_inches="tight") + plt.close(fig) + print(f"Saved: {combined_path}") + + # ── Individual full-disk figures ───────────────────────────── + for di in [0, 1]: + mask = disks == di + fig, ax = plt.subplots(figsize=(22, 22)) + plot_disk(ax, ids[mask], xs[mask], ys[mask], di, label_fontsize=6.0) + ax.set_title(f"Disk {di} ({mask.sum()} crystals)", fontsize=14) + fig.legend(handles=legend_elements, loc="upper right", fontsize=11, + frameon=True) + fig.suptitle(f"Mu2e Calorimeter — Disk {di}", fontsize=16) + fig.tight_layout(rect=[0, 0, 1, 0.97]) + disk_path = out_dir / f"crystal_map_disk{di}.png" + fig.savefig(disk_path, dpi=250, bbox_inches="tight") + plt.close(fig) + print(f"Saved: {disk_path}") + + # ── Zoomed quadrant views (IDs clearly legible) ────────────── + x_lo, x_hi = xs.min(), xs.max() + y_lo, y_hi = ys.min(), ys.max() + x_mid, y_mid = (x_lo + x_hi) / 2, (y_lo + y_hi) / 2 + pad = 30 + + quadrants = [ + ("top_left", (x_lo - pad, x_mid + pad), (y_mid - pad, y_hi + pad)), + ("top_right", (x_mid - pad, x_hi + pad), (y_mid - pad, y_hi + pad)), + ("bottom_left", (x_lo - pad, x_mid + pad), (y_lo - pad, y_mid + pad)), + ("bottom_right", (x_mid - pad, x_hi + pad), (y_lo - pad, y_mid + pad)), + ] + + for di in [0, 1]: + mask = disks == di + for qname, xlim, ylim in quadrants: + fig, ax = plt.subplots(figsize=(18, 18)) + plot_disk(ax, ids[mask], xs[mask], ys[mask], di, + label_fontsize=9.0, xlim=xlim, ylim=ylim) + ax.set_title(f"Disk {di} — {qname.replace('_', ' ')}", + fontsize=14) + fig.legend(handles=legend_elements, loc="upper right", + fontsize=11, frameon=True) + fig.tight_layout() + qpath = out_dir / f"crystal_map_disk{di}_{qname}.png" + fig.savefig(qpath, dpi=200, bbox_inches="tight") + plt.close(fig) + print(f"Saved: {qpath}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/plot_gnn_clusters.py b/CaloClusterGNN/scripts/plot_gnn_clusters.py new file mode 100644 index 0000000..edab192 --- /dev/null +++ b/CaloClusterGNN/scripts/plot_gnn_clusters.py @@ -0,0 +1,777 @@ +#!/usr/bin/env python3 +""" +GNN cluster visualization: 3-panel crystal-map event displays. + +For each selected event/disk, plots side-by-side: + Panel 1: MC truth clusters + Panel 2: BFS reco clusters (from EventNtuple) + Panel 3: GNN predicted clusters (with edge probability gradient) + +Covers plan tasks 7e (debug visualization) and 7f (GNN cluster display). + +Usage: + source setup_env.sh + + # Plot first 6 events from val split + python3 scripts/plot_gnn_clusters.py + + # Specific events from test split + python3 scripts/plot_gnn_clusters.py --split test --event-indices 0 5 10 + + # Auto-find failure cases (merges/splits) + python3 scripts/plot_gnn_clusters.py --find-failures --n-scan 200 +""" + +import argparse +import csv +import sys +import time +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import yaml + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +from matplotlib.patches import Rectangle, Patch +from matplotlib.collections import PatchCollection, LineCollection +from matplotlib.lines import Line2D +import matplotlib.colors as mcolors + +from src.data.graph_builder import build_graph, compute_edge_features, compute_node_features +from src.data.normalization import load_stats, normalize_graph +from src.data.truth_labels_primary import build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model +from torch_geometric.data import Data + +# ── Plot style ────────────────────────────────────────────────────── +plt.rcParams.update({ + "font.family": "sans-serif", + "font.sans-serif": ["DejaVu Sans", "Helvetica", "Arial"], + "font.size": 12, + "axes.titlesize": 14, + "axes.labelsize": 12, + "xtick.labelsize": 10, + "ytick.labelsize": 10, + "legend.fontsize": 11, + "figure.titlesize": 15, + "axes.linewidth": 0.8, + "xtick.major.width": 0.6, + "ytick.major.width": 0.6, +}) + +CRYSTAL_PITCH = 34.3 +CRYSTAL_SIZE = CRYSTAL_PITCH * 0.92 + +# Perceptually distinct, colorblind-friendly palette (tab20 inspired) +CLUSTER_COLORS = [ + "#1f77b4", "#d62728", "#2ca02c", "#9467bd", "#ff7f0e", + "#8c564b", "#e377c2", "#17becf", "#bcbd22", "#7f7f7f", + "#aec7e8", "#ffbb78", "#98df8a", "#c5b0d5", "#ff9896", + "#c49c94", "#f7b6d2", "#dbdb8d", "#9edae5", "#c7c7c7", +] +UNCLUSTERED_COLOR = "#d9d9d9" +BG_CRYSTAL_FACE = "#f0f0f0" +BG_CRYSTAL_EDGE = "#d5d5d5" +PANEL_BG = "#fafafa" + +# Red-to-green colormap for edge probabilities +EDGE_CMAP = plt.cm.RdYlGn + + +def build_mc_truth_clusters(simids, edeps, disks, nhits, + calo_root_map, purity_thresh=0.7): + """Build MC truth cluster labels per hit using calo-entrant truth.""" + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} + next_label = 0 + for i in range(nhits): + sids = np.array(simids[i]) + deps = np.array(edeps[i], dtype=np.float64) + if len(sids) == 0 or deps.sum() <= 0: + continue + disk = int(disks[i]) + root_edep = {} + for pid, dep in zip(sids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + best_root = max(root_edep, key=root_edep.get) + purity = root_edep[best_root] / deps.sum() + if purity < purity_thresh: + continue + key = (disk, best_root) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + return truth_labels + + +def detect_failures(pred_labels, truth_labels, energies): + """Detect merges and splits between pred and truth clusters.""" + pred_ids = sorted(set(pred_labels[pred_labels >= 0].tolist())) + truth_ids = sorted(set(truth_labels[truth_labels >= 0].tolist())) + if not pred_ids or not truth_ids: + return [], [] + + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + for i in range(len(energies)): + p, t, e = pred_labels[i], truth_labels[i], energies[i] + if p >= 0: + pred_energy[p] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + # Merges: pred cluster overlaps >1 truth cluster significantly + merged_preds = [] + for p in pred_ids: + if p not in overlap: + continue + sig = [t for t, e in overlap[p].items() + if pred_energy[p] > 0 and e / pred_energy[p] > 0.1] + if len(sig) > 1: + merged_preds.append(p) + + # Splits: truth cluster covered by >1 pred cluster + truth_to_pred = defaultdict(list) + for p in pred_ids: + if p not in overlap: + continue + for t, e in overlap[p].items(): + if pred_energy[p] > 0 and e / pred_energy[p] > 0.5: + truth_to_pred[t].append(p) + split_truths = [t for t, ps in truth_to_pred.items() if len(ps) > 1] + + return merged_preds, split_truths + + +def assign_colors(labels): + """Map cluster labels to colors. -1 gets UNCLUSTERED_COLOR.""" + unique = sorted(set(labels[labels >= 0].tolist())) + cmap = {cid: CLUSTER_COLORS[i % len(CLUSTER_COLORS)] + for i, cid in enumerate(unique)} + return cmap + + +def draw_panel(ax, hit_x, hit_y, hit_energy, labels, disk_id, crystal_map, + title, edge_index=None, edge_probs=None, + merged_clusters=None, split_clusters=None, truth_labels=None, + focus_hits=None): + """Draw one panel of the 3-panel display. + + If *focus_hits* is provided (set of hit indices), non-focus hits are + drawn very faintly so the failing clusters stand out. + """ + half = CRYSTAL_SIZE / 2 + n_hits = len(hit_x) + e_max = hit_energy.max() if hit_energy.max() > 0 else 1.0 + + # Background crystals + bg_patches = [] + for cid, (did, cx, cy) in crystal_map.items(): + if did != disk_id: + continue + bg_patches.append(Rectangle((cx - half, cy - half), + CRYSTAL_SIZE, CRYSTAL_SIZE)) + pc_bg = PatchCollection(bg_patches, facecolor=BG_CRYSTAL_FACE, + edgecolor=BG_CRYSTAL_EDGE, linewidth=0.3, zorder=1) + ax.add_collection(pc_bg) + + # Draw edges (only on GNN panel, colored by probability) + if edge_index is not None and edge_probs is not None: + lines = [] + colors = [] + seen = set() + for ei in range(edge_index.shape[1]): + s, d = edge_index[0, ei], edge_index[1, ei] + key = (min(s, d), max(s, d)) + if key in seen: + continue + seen.add(key) + # Dim edges not involving focus hits + ea = 0.6 + if focus_hits is not None and s not in focus_hits and d not in focus_hits: + ea = 0.08 + lines.append([(hit_x[s], hit_y[s]), (hit_x[d], hit_y[d])]) + c = list(EDGE_CMAP(edge_probs[ei])) + c[3] = ea + colors.append(c) + if lines: + lc = LineCollection(lines, colors=colors, linewidths=1.2, + zorder=2) + ax.add_collection(lc) + + # Color map for this panel's clusters + cmap = assign_colors(labels) + + # Group hits by crystal position for multi-hit handling + pos_hits = defaultdict(list) # (rounded x, y) -> [hit indices] + for i in range(n_hits): + pos_key = (round(hit_x[i], 1), round(hit_y[i], 1)) + pos_hits[pos_key].append(i) + + # Draw hit crystals + for pos_key, hit_indices in pos_hits.items(): + n_at_pos = len(hit_indices) + + for slot, i in enumerate(hit_indices): + in_focus = (focus_hits is None) or (i in focus_hits) + cid = labels[i] + if cid >= 0: + color = cmap[cid] + alpha = 0.5 + 0.5 * (hit_energy[i] / e_max) + else: + color = UNCLUSTERED_COLOR + alpha = 0.5 + + # Dim non-focus hits + if not in_focus: + alpha = 0.1 + + # Mark merged/split clusters with thick border + lw = 0.8 + ec = "#333333" + if merged_clusters and cid in merged_clusters: + lw = 3.0 + ec = "#cc0000" + if split_clusters and truth_labels is not None: + tc = truth_labels[i] + if tc in split_clusters: + lw = 3.0 + ec = "#e67300" + + cx, cy = hit_x[i], hit_y[i] + + if n_at_pos == 1: + # Single hit — full crystal + rect = Rectangle((cx - half, cy - half), + CRYSTAL_SIZE, CRYSTAL_SIZE, + facecolor=color, edgecolor=ec, + linewidth=lw, alpha=alpha, zorder=4) + ax.add_patch(rect) + else: + # Multi-hit — split crystal into horizontal bands + band_h = CRYSTAL_SIZE / n_at_pos + band_y = cy - half + slot * band_h + rect = Rectangle((cx - half, band_y), + CRYSTAL_SIZE, band_h, + facecolor=color, edgecolor=ec, + linewidth=lw, alpha=alpha, + zorder=4 + slot) + ax.add_patch(rect) + + # Energy label + text_alpha = 1.0 if in_focus else 0.12 + fs = 8 if in_focus else 5 + + if n_at_pos == 1: + ty = cy + else: + ty = cy - half + (slot + 0.5) * (CRYSTAL_SIZE / n_at_pos) + fs = max(5, fs - 1) # slightly smaller for stacked labels + + ax.text(cx, ty, f"{hit_energy[i]:.1f}", + fontsize=fs, ha="center", va="center", color="#1a1a1a", + fontweight="semibold" if in_focus else "normal", + zorder=5 + slot, alpha=text_alpha) + + # Count clusters + unique_ids = sorted(set(labels[labels >= 0].tolist())) + n_clust = len(unique_ids) + n_unclust = (labels == -1).sum() + total_e = hit_energy.sum() + + subtitle = f"{n_clust} clusters, {n_hits} hits, E = {total_e:.0f} MeV" + if n_unclust > 0: + subtitle += f", {n_unclust} unclustered" + ax.set_title(f"{title}\n{subtitle}", fontsize=13, fontweight="bold", + pad=10) + + ax.set_aspect("equal") + ax.set_facecolor(PANEL_BG) + ax.set_xlabel("x (mm)", fontsize=11) + ax.set_ylabel("y (mm)", fontsize=11) + ax.tick_params(axis="both", which="major", labelsize=9) + ax.grid(False) + + +def _get_failure_focus(gnn_labels, truth_labels, merged_preds, split_truths): + """Return set of hit indices involved in any merge or split.""" + focus = set() + for i in range(len(gnn_labels)): + if gnn_labels[i] in merged_preds: + focus.add(i) + if truth_labels[i] in split_truths: + focus.add(i) + return focus + + +def plot_event_3panel(hit_x, hit_y, hit_energy, disk_id, + truth_labels, bfs_labels, gnn_labels, + edge_index, edge_probs, + crystal_map, out_path, event_label="", + zoomed=False, focus_override=None): + """Plot 3-panel display: Truth | BFS | GNN. + + If *zoomed* is True, zoom into the failing clusters, dim irrelevant + hits, and label panels as "zoomed". + + *focus_override* — explicit set of hit indices to zoom into (e.g. + BFS failure hits for success displays). + """ + fig, axes = plt.subplots(1, 3, figsize=(36, 12)) + + # Detect GNN failure modes + merged_preds, split_truths = detect_failures( + gnn_labels, truth_labels, hit_energy) + + merged_set = set(merged_preds) + split_set = set(split_truths) + + # Focus hits and zoom limits + focus_hits = None + if focus_override: + focus_hits = focus_override + elif zoomed and (merged_preds or split_truths): + focus_hits = _get_failure_focus( + gnn_labels, truth_labels, merged_set, split_set) + + if focus_hits: + fx = hit_x[list(focus_hits)] + fy = hit_y[list(focus_hits)] + pad = 120 + xlim = (fx.min() - pad, fx.max() + pad) + ylim = (fy.min() - pad, fy.max() + pad) + else: + pad = 80 + xlim = (hit_x.min() - pad, hit_x.max() + pad) + ylim = (hit_y.min() - pad, hit_y.max() + pad) + + zoom_tag = " (zoomed)" if zoomed else "" + + # Panel 1: MC Truth + draw_panel(axes[0], hit_x, hit_y, hit_energy, truth_labels, + disk_id, crystal_map, f"MC Truth{zoom_tag}", + focus_hits=focus_hits) + + # Panel 2: BFS + draw_panel(axes[1], hit_x, hit_y, hit_energy, bfs_labels, + disk_id, crystal_map, f"BFS Reco{zoom_tag}", + focus_hits=focus_hits) + + # Panel 3: GNN + gnn_title = "GNN Predicted" + annotations = [] + if merged_preds: + annotations.append(f"{len(merged_preds)} merge") + if split_truths: + annotations.append(f"{len(split_truths)} split") + if annotations: + gnn_title += f" ({', '.join(annotations)})" + gnn_title += zoom_tag + + draw_panel(axes[2], hit_x, hit_y, hit_energy, gnn_labels, + disk_id, crystal_map, gnn_title, + edge_index=edge_index, edge_probs=edge_probs, + merged_clusters=merged_set, + split_clusters=split_set, + truth_labels=truth_labels, + focus_hits=focus_hits) + + # Set consistent limits + for ax in axes: + ax.set_xlim(*xlim) + ax.set_ylim(*ylim) + + # Legend — clean, compact + legend_handles = [ + Line2D([0], [0], color=EDGE_CMAP(0.0), linewidth=2.5, + label="Edge prob \u2248 0"), + Line2D([0], [0], color=EDGE_CMAP(1.0), linewidth=2.5, + label="Edge prob \u2248 1"), + Patch(facecolor=UNCLUSTERED_COLOR, edgecolor="#666666", linewidth=0.5, + label="Unclustered"), + Patch(facecolor="white", edgecolor="#cc0000", linewidth=2.5, + label="Merged cluster"), + Patch(facecolor="white", edgecolor="#e67300", linewidth=2.5, + label="Split cluster"), + ] + fig.legend(handles=legend_handles, loc="lower center", ncol=5, + fontsize=11, frameon=True, framealpha=0.9, + edgecolor="#cccccc", bbox_to_anchor=(0.5, -0.01)) + + fig.suptitle(event_label, fontsize=15, fontweight="bold", y=1.02) + fig.tight_layout(w_pad=3) + fig.savefig(out_path, dpi=180, bbox_inches="tight", + facecolor="white", edgecolor="none") + plt.close(fig) + + +def process_event_disk(arrays, ev, disk_id, crystal_map, graph_cfg, + model, stats, device, tau_edge, tau_node=None, + calo_root_map=None): + """Extract one event-disk, run GNN, return all data for plotting. + + Returns None if the disk has < 2 hits. + """ + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + return None + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + cluster_idx = np.array(arrays["calohits.clusterIdx_"][ev], dtype=np.int64) + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], + dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], + dtype=np.float64) + simids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + disks = np.array([crystal_map[int(c)][0] if int(c) in crystal_map + else -1 for c in cryids], dtype=np.int64) + + if np.all(xs == 0) and np.all(ys == 0): + for i, c in enumerate(cryids): + if int(c) in crystal_map: + _, xs[i], ys[i] = crystal_map[int(c)] + + dm = disks == disk_id + n_disk = dm.sum() + if n_disk < 2: + return None + + d_e = energies[dm] + d_t = times[dm] + d_x = xs[dm] + d_y = ys[dm] + d_pos = np.stack([d_x, d_y], axis=1) + d_cidx = cluster_idx[dm] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + disk_indices = np.where(dm)[0] + d_simids = [list(simids[i]) for i in disk_indices] + d_edeps = [list(edeps_mc[i]) for i in disk_indices] + + mc_truth = build_mc_truth_clusters(d_simids, d_edeps, d_disks, n_disk, + calo_root_map) + + # Build graph and run GNN + edge_index, _ = build_graph( + d_pos, d_t, + r_max=graph_cfg["r_max_mm"], dt_max=graph_cfg["dt_max_ns"], + k_min=graph_cfg["k_min"], k_max=graph_cfg["k_max"]) + + if edge_index.shape[1] == 0: + gnn_labels = np.arange(n_disk) + edge_probs = np.array([]) + return { + "hit_x": d_x, "hit_y": d_y, "energies": d_e, + "truth_labels": mc_truth, "bfs_labels": d_cidx, + "gnn_labels": gnn_labels, "edge_index": edge_index, + "edge_probs": edge_probs, "disk_id": disk_id, + } + + node_feat = compute_node_features(d_pos, d_t, d_e) + edge_feat = compute_edge_features(d_pos, d_t, d_e, edge_index) + + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + ) + normalize_graph(data, stats) + + with torch.no_grad(): + output = model(data.to(device)) + + # Handle both dict (CaloClusterNet) and tensor (SimpleEdgeNet) + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + edge_probs = 1.0 / (1.0 + np.exp(-logits_np.astype(np.float64))) + + gnn_labels, _ = reconstruct_clusters( + edge_index=edge_index, edge_logits=logits_np, + n_nodes=n_disk, energies=d_e, + tau_edge=tau_edge, min_hits=1, min_energy_mev=0.0, + node_logits=node_logits_np, tau_node=tau_node) + + return { + "hit_x": d_x, "hit_y": d_y, "energies": d_e, + "truth_labels": mc_truth, "bfs_labels": d_cidx, + "gnn_labels": gnn_labels, "edge_index": edge_index, + "edge_probs": edge_probs, "disk_id": disk_id, + } + + +def main(): + parser = argparse.ArgumentParser( + description="3-panel event display: MC Truth | BFS | GNN") + parser.add_argument("--root-dir", type=str, + default="/exp/mu2e/data/users/wzhou2/GNN/root_files_v2") + parser.add_argument("--checkpoint", type=str, + default="outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt") + parser.add_argument("--config", type=str, default="configs/default.yaml") + parser.add_argument("--split", type=str, default="val", + choices=["val", "test"], + help="Which split to draw events from") + parser.add_argument("--n-events", type=int, default=6, + help="Number of event-disk graphs to plot") + parser.add_argument("--event-indices", type=int, nargs="*", default=None, + help="Specific event indices within the first file") + parser.add_argument("--tau-edge", type=float, default=None, + help="Override tau_edge (default: from config)") + parser.add_argument("--find-failures", action="store_true", + help="Scan events to find and plot failure cases " + "(merges/splits)") + parser.add_argument("--find-successes", action="store_true", + help="Scan events to find and plot cases where " + "GNN clustering was very good (no failures, " + "many clusters)") + parser.add_argument("--n-scan", type=int, default=200, + help="Events to scan in --find-failures/successes mode") + parser.add_argument("--output-dir", type=str, default=None, + help="Output directory (default: outputs/gnn_cluster_display_)") + parser.add_argument("--device", type=str, default=None) + args = parser.parse_args() + + with open(args.config) as f: + cfg = yaml.safe_load(f) + + if args.device: + device = torch.device(args.device) + elif torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + + tau_edge = args.tau_edge or cfg["inference"]["tau_edge"] + graph_cfg = cfg["graph"] + model_name = cfg["model"].get("name", "SimpleEdgeNet") + has_node_head = model_name == "CaloClusterNet" + # Only apply tau_node if the node head was actually trained (lambda_node > 0) + lambda_node = cfg.get("train", {}).get("lambda_node", 0.0) + tau_node = cfg["inference"].get("tau_node") if (has_node_head and lambda_node > 0) else None + + # Load model + model = build_model(cfg) + ckpt = torch.load(args.checkpoint, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + print(f"Model: {model_name}, epoch {ckpt['epoch']}, val F1={ckpt['val_f1']:.4f}") + print(f"tau_edge = {tau_edge}, tau_node = {tau_node}, device = {device}") + + stats = load_stats(cfg["data"]["normalization_stats"]) + crystal_map = load_crystal_map("data/crystal_geometry.csv") + + # Load split file list + import uproot + split_key = args.split + with open(cfg["data"]["splits"][split_key]) as f: + file_list = [l.strip() for l in f if l.strip()] + print(f"Split '{split_key}': {len(file_list)} files") + + if args.output_dir is None: + args.output_dir = f"outputs/gnn_cluster_display_{model_name.lower()}" + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + branches = [ + "calohits.crystalId_", "calohits.eDep_", "calohits.time_", + "calohits.clusterIdx_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calomcsim.id", "calomcsim.ancestorSimIds", + ] + + crystal_disk_map = {cid: disk for cid, (disk, _, _) in crystal_map.items()} + + if args.find_failures: + # Scan events to find interesting failure cases + print(f"Scanning {args.n_scan} events for failure cases...") + failure_events = [] + + fname = Path(file_list[0]).name + local_path = str(Path(args.root_dir) / fname) + tree = uproot.open(local_path + ":EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.n_scan) + + for ev in range(len(arrays)): + # Build calo-entrant root map for this event + crm = build_calo_root_map( + arrays["calomcsim.id"][ev], + arrays["calomcsim.ancestorSimIds"][ev], + arrays["calohitsmc.simParticleIds"][ev], + arrays["calohits.crystalId_"][ev], + crystal_disk_map) + for disk_id in [0, 1]: + result = process_event_disk( + arrays, ev, disk_id, crystal_map, graph_cfg, + model, stats, device, tau_edge, tau_node=tau_node, + calo_root_map=crm) + if result is None: + continue + merged, split = detect_failures( + result["gnn_labels"], result["truth_labels"], + result["energies"]) + n_fail = len(merged) + len(split) + if n_fail > 0: + failure_events.append((ev, disk_id, n_fail, result)) + + failure_events.sort(key=lambda x: -x[2]) + print(f"Found {len(failure_events)} event-disks with failures") + + n_plot = min(args.n_events, len(failure_events)) + for idx in range(n_plot): + ev, disk_id, n_fail, result = failure_events[idx] + label = (f"Event {ev}, Disk {disk_id} — " + f"{n_fail} failure(s) [file: {Path(file_list[0]).name}]") + out_path = out_dir / f"debug_{idx:03d}_evt{ev}_disk{disk_id}.png" + plot_event_3panel( + result["hit_x"], result["hit_y"], result["energies"], + result["disk_id"], result["truth_labels"], + result["bfs_labels"], result["gnn_labels"], + result["edge_index"], result["edge_probs"], + crystal_map, out_path, event_label=label, + zoomed=True) + print(f" [{idx+1}/{n_plot}] {label} -> {out_path.name}") + + elif args.find_successes: + # Scan events to find GNN success cases (no failures, many clusters) + print(f"Scanning {args.n_scan} events for success cases...") + success_events = [] + + fname = Path(file_list[0]).name + local_path = str(Path(args.root_dir) / fname) + tree = uproot.open(local_path + ":EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.n_scan) + + for ev in range(len(arrays)): + crm = build_calo_root_map( + arrays["calomcsim.id"][ev], + arrays["calomcsim.ancestorSimIds"][ev], + arrays["calohitsmc.simParticleIds"][ev], + arrays["calohits.crystalId_"][ev], + crystal_disk_map) + for disk_id in [0, 1]: + result = process_event_disk( + arrays, ev, disk_id, crystal_map, graph_cfg, + model, stats, device, tau_edge, tau_node=tau_node, + calo_root_map=crm) + if result is None: + continue + gnn_merged, gnn_split = detect_failures( + result["gnn_labels"], result["truth_labels"], + result["energies"]) + bfs_merged, bfs_split = detect_failures( + result["bfs_labels"], result["truth_labels"], + result["energies"]) + n_gnn_fail = len(gnn_merged) + len(gnn_split) + n_bfs_fail = len(bfs_merged) + len(bfs_split) + # GNN succeeds where BFS fails + if n_gnn_fail == 0 and n_bfs_fail > 0: + # Store BFS failure hit indices for zooming + bfs_focus = _get_failure_focus( + result["bfs_labels"], result["truth_labels"], + set(bfs_merged), set(bfs_split)) + success_events.append( + (ev, disk_id, n_bfs_fail, result, bfs_focus)) + + # Sort by most BFS failures (biggest GNN advantage) + success_events.sort(key=lambda x: -x[2]) + print(f"Found {len(success_events)} event-disks where " + f"GNN perfect but BFS has failures") + + if args.output_dir is None: + args.output_dir = f"outputs/success_{model_name.lower()}" + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + n_plot = min(args.n_events, len(success_events)) + for idx in range(n_plot): + ev, disk_id, n_bfs_fail, result, bfs_focus = success_events[idx] + label = (f"Event {ev}, Disk {disk_id} — " + f"GNN: 0 failures, BFS: {n_bfs_fail} failure(s) " + f"[file: {Path(file_list[0]).name}]") + out_path = out_dir / f"success_{idx:03d}_evt{ev}_disk{disk_id}.png" + plot_event_3panel( + result["hit_x"], result["hit_y"], result["energies"], + result["disk_id"], result["truth_labels"], + result["bfs_labels"], result["gnn_labels"], + result["edge_index"], result["edge_probs"], + crystal_map, out_path, event_label=label, + zoomed=True, focus_override=bfs_focus) + print(f" [{idx+1}/{n_plot}] {label} -> {out_path.name}") + + else: + # Plot specific events or first N + fname = Path(file_list[0]).name + local_path = str(Path(args.root_dir) / fname) + tree = uproot.open(local_path + ":EventNtuple/ntuple") + + # Determine how many events to read + if args.event_indices: + n_read = max(args.event_indices) + 1 + else: + n_read = args.n_events * 3 # read extra since some disks are skipped + arrays = tree.arrays(branches, entry_stop=n_read) + + plotted = 0 + target = args.n_events + ev_iter = args.event_indices if args.event_indices else range(len(arrays)) + + for ev in ev_iter: + if plotted >= target: + break + if ev >= len(arrays): + continue + # Build calo-entrant root map for this event + crm = build_calo_root_map( + arrays["calomcsim.id"][ev], + arrays["calomcsim.ancestorSimIds"][ev], + arrays["calohitsmc.simParticleIds"][ev], + arrays["calohits.crystalId_"][ev], + crystal_disk_map) + for disk_id in [0, 1]: + if plotted >= target: + break + result = process_event_disk( + arrays, ev, disk_id, crystal_map, graph_cfg, + model, stats, device, tau_edge, tau_node=tau_node, + calo_root_map=crm) + if result is None: + continue + + label = (f"Event {ev}, Disk {disk_id} " + f"[{split_key} split, {Path(file_list[0]).name}]") + out_path = out_dir / f"display_{plotted:03d}_evt{ev}_disk{disk_id}.png" + plot_event_3panel( + result["hit_x"], result["hit_y"], result["energies"], + result["disk_id"], result["truth_labels"], + result["bfs_labels"], result["gnn_labels"], + result["edge_index"], result["edge_probs"], + crystal_map, out_path, event_label=label) + print(f" [{plotted+1}/{target}] {label} -> {out_path.name}") + plotted += 1 + + print(f"\nDone. Plots saved to {out_dir}/") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/plot_new_truth_clusters.py b/CaloClusterGNN/scripts/plot_new_truth_clusters.py new file mode 100644 index 0000000..642ccf2 --- /dev/null +++ b/CaloClusterGNN/scripts/plot_new_truth_clusters.py @@ -0,0 +1,656 @@ +#!/usr/bin/env python3 +"""3-panel event display using calo-entrant truth from v2 ROOT files. + +For each selected event/disk, plots side-by-side: + Panel 1: Calo-entrant MC truth clusters (new definition) + Panel 2: BFS reco clusters (from EventNtuple) + Panel 3: GNN predicted clusters (with edge probability gradient) + +Usage: + source setup_env.sh + + # Plot first 6 events from val split + OMP_NUM_THREADS=4 python3 scripts/plot_new_truth_clusters.py + + # Auto-find failure cases under new truth + OMP_NUM_THREADS=4 python3 scripts/plot_new_truth_clusters.py --find-failures --n-scan 200 + + # Use CaloClusterNet + OMP_NUM_THREADS=4 python3 scripts/plot_new_truth_clusters.py \ + --config configs/calo_cluster_net.yaml \ + --checkpoint outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt +""" + +import argparse +import csv +import sys +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import yaml + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +from matplotlib.patches import Rectangle, Patch +from matplotlib.collections import PatchCollection, LineCollection +from matplotlib.lines import Line2D + +from src.data.graph_builder import build_graph, compute_edge_features, compute_node_features +from src.data.normalization import load_stats, normalize_graph +from src.data.truth_labels_primary import build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model +from torch_geometric.data import Data + +CRYSTAL_PITCH = 34.3 +CRYSTAL_SIZE = CRYSTAL_PITCH * 0.92 + +CLUSTER_COLORS = [ + "#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", + "#a65628", "#f781bf", "#999999", "#66c2a5", "#fc8d62", + "#8da0cb", "#e78ac3", "#a6d854", "#ffd92f", "#e5c494", + "#b3b3b3", "#1b9e77", "#d95f02", "#7570b3", "#e7298a", +] +UNCLUSTERED_COLOR = "#cccccc" +EDGE_CMAP = plt.cm.RdYlGn + + +def build_truth_clusters_new(simids, edeps, disks, nhits, + calo_root_map, purity_thresh=0.7): + """Build calo-entrant truth clusters.""" + truth_labels = np.full(nhits, -1, dtype=np.int64) + cluster_map = {} + next_label = 0 + for i in range(nhits): + pids = list(simids[i]) + deps = list(edeps[i]) + if len(pids) == 0 or sum(deps) <= 0: + continue + total_e = sum(deps) + disk = int(disks[i]) + root_edep = {} + for pid, dep in zip(pids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + best_root = max(root_edep, key=root_edep.get) + if root_edep[best_root] / total_e < purity_thresh: + continue + key = (disk, best_root) + if key not in cluster_map: + cluster_map[key] = next_label + next_label += 1 + truth_labels[i] = cluster_map[key] + return truth_labels + + +def detect_failures(pred_labels, truth_labels, energies): + """Detect merges and splits between pred and truth clusters. + + Returns (merged_pred_ids, split_truth_ids, involved_truth_ids, involved_pred_ids). + The last two sets contain all truth/pred cluster IDs participating in any failure. + """ + pred_ids = sorted(set(pred_labels[pred_labels >= 0].tolist())) + truth_ids = sorted(set(truth_labels[truth_labels >= 0].tolist())) + if not pred_ids or not truth_ids: + return [], [], set(), set() + + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + for i in range(len(energies)): + p, t, e = pred_labels[i], truth_labels[i], energies[i] + if p >= 0: + pred_energy[p] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + merged_preds = [] + involved_truth = set() + involved_pred = set() + for p in pred_ids: + if p not in overlap: + continue + sig = [t for t, e in overlap[p].items() + if pred_energy[p] > 0 and e / pred_energy[p] > 0.1] + if len(sig) > 1: + merged_preds.append(p) + involved_pred.add(p) + involved_truth.update(sig) + + truth_to_pred = defaultdict(list) + for p in pred_ids: + if p not in overlap: + continue + for t, e in overlap[p].items(): + if pred_energy[p] > 0 and e / pred_energy[p] > 0.5: + truth_to_pred[t].append(p) + split_truths = [t for t, ps in truth_to_pred.items() if len(ps) > 1] + for t in split_truths: + involved_truth.add(t) + involved_pred.update(truth_to_pred[t]) + + return merged_preds, split_truths, involved_truth, involved_pred + + +def assign_colors(labels): + unique = sorted(set(labels[labels >= 0].tolist())) + return {cid: CLUSTER_COLORS[i % len(CLUSTER_COLORS)] + for i, cid in enumerate(unique)} + + +def draw_panel(ax, hit_x, hit_y, hit_energy, labels, disk_id, crystal_map, + title, edge_index=None, edge_probs=None, + merged_clusters=None, split_clusters=None, truth_labels=None, + focus_labels=None): + """Draw one panel. + + Parameters + ---------- + focus_labels : set or None + If given, only these cluster IDs (in *labels*) are colored. + Everything else is dimmed to light gray. Used for debug zoom. + """ + half = CRYSTAL_SIZE / 2 + n_hits = len(hit_x) + e_max = hit_energy.max() if hit_energy.max() > 0 else 1.0 + + # Jitter overlapping hits (same crystal, multiple time windows) + draw_x = hit_x.copy() + draw_y = hit_y.copy() + pos_count = {} + jitter = CRYSTAL_SIZE * 0.25 + for i in range(n_hits): + key = (round(hit_x[i], 1), round(hit_y[i], 1)) + idx = pos_count.get(key, 0) + pos_count[key] = idx + 1 + if idx > 0: + # Offset: first duplicate goes up-right, second down-left, etc. + dx = jitter * (1 if idx % 2 == 1 else -1) + dy = jitter * (1 if idx <= 2 else -1) + draw_x[i] += dx + draw_y[i] += dy + + # Background crystals + bg_patches = [] + for cid, (did, cx, cy) in crystal_map.items(): + if did != disk_id: + continue + bg_patches.append(Rectangle((cx - half, cy - half), + CRYSTAL_SIZE, CRYSTAL_SIZE)) + pc_bg = PatchCollection(bg_patches, facecolor="#e8e8e8", + edgecolor="#cccccc", linewidth=0.3, zorder=1) + ax.add_collection(pc_bg) + + # Edges — only draw edges touching focused hits in focus mode + if edge_index is not None and edge_probs is not None: + lines, colors = [], [] + seen = set() + for ei in range(edge_index.shape[1]): + s, d = edge_index[0, ei], edge_index[1, ei] + key = (min(s, d), max(s, d)) + if key in seen: + continue + seen.add(key) + # In focus mode, only draw edges where at least one endpoint + # is in a focused cluster + if focus_labels is not None: + s_in = labels[s] in focus_labels + d_in = labels[d] in focus_labels + if not (s_in or d_in): + continue + lines.append([(draw_x[s], draw_y[s]), (draw_x[d], draw_y[d])]) + colors.append(EDGE_CMAP(edge_probs[ei])) + if lines: + lw = 1.5 if focus_labels else 0.8 + lc = LineCollection(lines, colors=colors, linewidths=lw, + zorder=2, alpha=0.7) + ax.add_collection(lc) + + # Only color focused clusters; dim the rest + if focus_labels is not None: + focused_ids = sorted(focus_labels) + cmap = {cid: CLUSTER_COLORS[i % len(CLUSTER_COLORS)] + for i, cid in enumerate(focused_ids)} + else: + cmap = assign_colors(labels) + + for i in range(n_hits): + cid = labels[i] + is_focused = focus_labels is None or cid in focus_labels + + if not is_focused: + # Dim non-focused hits + color = "#e0e0e0" + alpha = 0.3 + lw, ec = 0.3, "#d0d0d0" + fontsize = 0 # hide energy text + elif cid >= 0: + color = cmap.get(cid, UNCLUSTERED_COLOR) + alpha = 0.6 + 0.4 * (hit_energy[i] / e_max) + lw, ec = 1.5, "black" + fontsize = 7 if focus_labels else 5 + else: + color = UNCLUSTERED_COLOR + alpha = 0.5 + lw, ec = 1.0, "black" + fontsize = 5 + + if is_focused: + if merged_clusters and cid in merged_clusters: + lw, ec = 3.0, "red" + if split_clusters and truth_labels is not None: + tc = truth_labels[i] + if tc in split_clusters: + lw, ec = 3.0, "darkorange" + + rect = Rectangle((draw_x[i] - half, draw_y[i] - half), + CRYSTAL_SIZE, CRYSTAL_SIZE, + facecolor=color, edgecolor=ec, + linewidth=lw, alpha=alpha, zorder=4 if is_focused else 2) + ax.add_patch(rect) + if fontsize > 0: + ax.text(draw_x[i], draw_y[i], f"{hit_energy[i]:.1f}", + fontsize=fontsize, ha="center", va="center", + color="black", fontweight="bold" if focus_labels else "normal", + zorder=5, style="italic") + + unique_ids = sorted(set(labels[labels >= 0].tolist())) + n_clust = len(unique_ids) + n_unclust = (labels == -1).sum() + total_e = hit_energy.sum() + + subtitle = f"{n_clust} clusters, {n_hits} hits, E={total_e:.0f} MeV" + if n_unclust > 0: + subtitle += f", {n_unclust} unclustered" + if focus_labels is not None: + n_shown = len(focus_labels) + subtitle += f"\nShowing {n_shown}/{n_clust} clusters involved in failures (rest dimmed)" + ax.set_title(f"{title}\n{subtitle}", fontsize=10) + ax.set_aspect("equal") + ax.set_facecolor("#f7f7f7") + ax.grid(True, linewidth=0.2, alpha=0.3, zorder=0) + + +def plot_event_3panel(hit_x, hit_y, hit_energy, disk_id, + truth_labels, bfs_labels, gnn_labels, + edge_index, edge_probs, + crystal_map, out_path, event_label="", + debug=False): + """Plot 3-panel display. + + If debug=True, identifies the failure region, zooms in, and dims + all clusters not involved in the failure. + """ + fig, axes = plt.subplots(1, 3, figsize=(36, 12)) + + merged_gnn, split_gnn, inv_truth_gnn, inv_pred_gnn = detect_failures( + gnn_labels, truth_labels, hit_energy) + merged_bfs, split_bfs, inv_truth_bfs, inv_pred_bfs = detect_failures( + bfs_labels, truth_labels, hit_energy) + + # Combine all truth clusters involved in any failure (GNN or BFS) + all_inv_truth = inv_truth_gnn | inv_truth_bfs + + # Focus sets per panel (which cluster IDs to highlight) + focus_truth = all_inv_truth if debug else None + focus_bfs = inv_pred_bfs if debug else None + focus_gnn = inv_pred_gnn if debug else None + + if debug and all_inv_truth: + # Zoom to the region containing involved hits (+ padding) + inv_hits = np.array([i for i in range(len(truth_labels)) + if truth_labels[i] in all_inv_truth]) + if len(inv_hits) > 0: + pad = 120 + xlim = (hit_x[inv_hits].min() - pad, hit_x[inv_hits].max() + pad) + ylim = (hit_y[inv_hits].min() - pad, hit_y[inv_hits].max() + pad) + else: + pad = 80 + xlim = (hit_x.min() - pad, hit_x.max() + pad) + ylim = (hit_y.min() - pad, hit_y.max() + pad) + else: + pad = 80 + xlim = (hit_x.min() - pad, hit_x.max() + pad) + ylim = (hit_y.min() - pad, hit_y.max() + pad) + + # Panel 1: Calo-entrant truth + draw_panel(axes[0], hit_x, hit_y, hit_energy, truth_labels, + disk_id, crystal_map, "MC Truth (calo-entrant)", + focus_labels=focus_truth) + + # Panel 2: BFS + bfs_title = "BFS Reco" + bfs_ann = [] + if merged_bfs: + bfs_ann.append(f"{len(merged_bfs)} merge(s)") + if split_bfs: + bfs_ann.append(f"{len(split_bfs)} split(s)") + if bfs_ann: + bfs_title += f" [{', '.join(bfs_ann)}]" + draw_panel(axes[1], hit_x, hit_y, hit_energy, bfs_labels, + disk_id, crystal_map, bfs_title, + merged_clusters=set(merged_bfs), + split_clusters=set(split_bfs), + truth_labels=truth_labels, + focus_labels=focus_bfs) + + # Panel 3: GNN + gnn_title = "GNN Predicted" + gnn_ann = [] + if merged_gnn: + gnn_ann.append(f"{len(merged_gnn)} merge(s)") + if split_gnn: + gnn_ann.append(f"{len(split_gnn)} split(s)") + if gnn_ann: + gnn_title += f" [{', '.join(gnn_ann)}]" + draw_panel(axes[2], hit_x, hit_y, hit_energy, gnn_labels, + disk_id, crystal_map, gnn_title, + edge_index=edge_index, edge_probs=edge_probs, + merged_clusters=set(merged_gnn), + split_clusters=set(split_gnn), + truth_labels=truth_labels, + focus_labels=focus_gnn) + + for ax in axes: + ax.set_xlim(*xlim) + ax.set_ylim(*ylim) + + legend_handles = [ + Line2D([0], [0], color=EDGE_CMAP(0.0), linewidth=2, + label="Edge prob ~0 (different cluster)"), + Line2D([0], [0], color=EDGE_CMAP(1.0), linewidth=2, + label="Edge prob ~1 (same cluster)"), + Patch(facecolor="#e0e0e0", edgecolor="#d0d0d0", linewidth=0.5, + label="Not involved (dimmed)"), + Patch(facecolor="white", edgecolor="red", linewidth=2.5, + label="Merged cluster"), + Patch(facecolor="white", edgecolor="darkorange", linewidth=2.5, + label="Split cluster"), + ] + fig.legend(handles=legend_handles, loc="lower center", ncol=5, + fontsize=9, frameon=True, bbox_to_anchor=(0.5, -0.02)) + + fig.suptitle(event_label, fontsize=12, fontweight="bold", y=1.01) + fig.tight_layout() + fig.savefig(out_path, dpi=150, bbox_inches="tight") + plt.close(fig) + + +def v1_to_v2_path(v1_name, v2_dir): + stem = Path(v1_name).stem + seq = stem.split(".")[-1] + matches = list(Path(v2_dir).glob(f"mcs.*{seq}.root")) + return matches[0] if matches else None + + +def process_event_disk(arrays, ev, disk_id, crystal_map, crystal_disk_map, + graph_cfg, model, stats, device, tau_edge, + tau_node=None): + """Extract one event-disk from v2 arrays, run GNN, return plot data.""" + import uproot + + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + return None + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + cluster_idx = np.array(arrays["calohits.clusterIdx_"][ev], dtype=np.int64) + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], dtype=np.float64) + simids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + disks = np.array([crystal_map[int(c)][0] if int(c) in crystal_map + else -1 for c in cryids], dtype=np.int64) + + if np.all(xs == 0) and np.all(ys == 0): + for i, c in enumerate(cryids): + if int(c) in crystal_map: + _, xs[i], ys[i] = crystal_map[int(c)] + + dm = disks == disk_id + n_disk = dm.sum() + if n_disk < 2: + return None + + d_e = energies[dm] + d_t = times[dm] + d_x = xs[dm] + d_y = ys[dm] + d_pos = np.stack([d_x, d_y], axis=1) + d_cidx = cluster_idx[dm] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + disk_indices = np.where(dm)[0] + d_simids = [list(simids[i]) for i in disk_indices] + d_edeps = [list(edeps_mc[i]) for i in disk_indices] + + # Build calo-root map for this event + sim_ids_evt = arrays["calomcsim.id"][ev] + anc_evt = arrays["calomcsim.ancestorSimIds"][ev] + calo_root_map = build_calo_root_map( + sim_ids_evt, anc_evt, simids, cryids, crystal_disk_map) + + mc_truth = build_truth_clusters_new( + d_simids, d_edeps, d_disks, n_disk, calo_root_map) + + # Build graph and run GNN + edge_index, _ = build_graph( + d_pos, d_t, + r_max=graph_cfg["r_max_mm"], dt_max=graph_cfg["dt_max_ns"], + k_min=graph_cfg["k_min"], k_max=graph_cfg["k_max"]) + + if edge_index.shape[1] == 0: + return { + "hit_x": d_x, "hit_y": d_y, "energies": d_e, + "truth_labels": mc_truth, "bfs_labels": d_cidx, + "gnn_labels": np.arange(n_disk), + "edge_index": edge_index, "edge_probs": np.array([]), + "disk_id": disk_id, + } + + node_feat = compute_node_features(d_pos, d_t, d_e) + edge_feat = compute_edge_features(d_pos, d_t, d_e, edge_index) + + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + ) + normalize_graph(data, stats) + + with torch.no_grad(): + output = model(data.to(device)) + + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + edge_probs = 1.0 / (1.0 + np.exp(-logits_np.astype(np.float64))) + + gnn_labels, _ = reconstruct_clusters( + edge_index=edge_index, edge_logits=logits_np, + n_nodes=n_disk, energies=d_e, + tau_edge=tau_edge, min_hits=1, min_energy_mev=0.0, + node_logits=node_logits_np, tau_node=tau_node) + + return { + "hit_x": d_x, "hit_y": d_y, "energies": d_e, + "truth_labels": mc_truth, "bfs_labels": d_cidx, + "gnn_labels": gnn_labels, "edge_index": edge_index, + "edge_probs": edge_probs, "disk_id": disk_id, + } + + +def main(): + parser = argparse.ArgumentParser( + description="3-panel display with calo-entrant truth (v2 ROOT files)") + parser.add_argument("--v2-dir", default="/exp/mu2e/data/users/wzhou2/GNN/root_files_v2") + parser.add_argument("--checkpoint", default="outputs/runs/simple_edge_net_v1/checkpoints/best_model.pt") + parser.add_argument("--config", default="configs/default.yaml") + parser.add_argument("--split", default="val", choices=["val", "test", "train"]) + parser.add_argument("--n-events", type=int, default=6) + parser.add_argument("--find-failures", action="store_true") + parser.add_argument("--n-scan", type=int, default=200) + parser.add_argument("--output-dir", default=None) + parser.add_argument("--device", default=None) + args = parser.parse_args() + + with open(args.config) as f: + cfg = yaml.safe_load(f) + + device = torch.device(args.device or ("cuda" if torch.cuda.is_available() else "cpu")) + tau_edge = cfg["inference"]["tau_edge"] + graph_cfg = cfg["graph"] + model_name = cfg["model"].get("name", "SimpleEdgeNet") + has_node_head = model_name == "CaloClusterNet" + lambda_node = cfg.get("train", {}).get("lambda_node", 0.0) + tau_node = cfg["inference"].get("tau_node") if (has_node_head and lambda_node > 0) else None + + model = build_model(cfg) + ckpt = torch.load(args.checkpoint, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + print(f"Model: {model_name}, epoch {ckpt['epoch']}, val F1={ckpt['val_f1']:.4f}") + print(f"tau_edge={tau_edge}, tau_node={tau_node}, device={device}") + + stats = load_stats(cfg["data"]["normalization_stats"]) + crystal_map = load_crystal_map("data/crystal_geometry.csv") + crystal_disk_map = {cid: info[0] for cid, info in crystal_map.items()} + + import uproot + + # Find available v2 files for this split + with open(cfg["data"]["splits"][args.split]) as f: + v1_files = [l.strip() for l in f if l.strip()] + + v2_files = [] + for v1 in v1_files: + v2 = v1_to_v2_path(v1, args.v2_dir) + if v2 and v2.stat().st_size >= 1800 * 1024 * 1024: + try: + uproot.open(f"{v2}:EventNtuple/ntuple") + v2_files.append(v2) + except Exception: + pass + print(f"Split '{args.split}': {len(v2_files)} v2 files available") + if not v2_files: + print("ERROR: No valid v2 files for this split") + return + + if args.output_dir is None: + suffix = "debug_newtruth" if args.find_failures else "display_newtruth" + args.output_dir = f"outputs/{suffix}_{model_name.lower()}" + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + branches = [ + "calohits.crystalId_", "calohits.eDep_", "calohits.time_", + "calohits.clusterIdx_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calomcsim.id", "calomcsim.ancestorSimIds", + ] + + if args.find_failures: + print(f"Scanning {args.n_scan} events for failure cases (new truth)...") + failure_events = [] + + for v2_path in v2_files: + tree = uproot.open(f"{v2_path}:EventNtuple/ntuple") + arrays = tree.arrays(branches, entry_stop=args.n_scan) + fname = v2_path.name + + for ev in range(len(arrays)): + for disk_id in [0, 1]: + result = process_event_disk( + arrays, ev, disk_id, crystal_map, crystal_disk_map, + graph_cfg, model, stats, device, tau_edge, tau_node) + if result is None: + continue + merged_gnn, split_gnn, _, _ = detect_failures( + result["gnn_labels"], result["truth_labels"], + result["energies"]) + merged_bfs, split_bfs, _, _ = detect_failures( + result["bfs_labels"], result["truth_labels"], + result["energies"]) + n_fail = len(merged_gnn) + len(split_gnn) + n_fail_bfs = len(merged_bfs) + len(split_bfs) + if n_fail > 0 or n_fail_bfs > 0: + failure_events.append( + (ev, disk_id, n_fail, n_fail_bfs, result, fname)) + + if len(failure_events) >= args.n_events * 3: + break + + # Sort by GNN failures first, then BFS + failure_events.sort(key=lambda x: -(x[2] + x[3])) + print(f"Found {len(failure_events)} event-disks with failures") + + n_plot = min(args.n_events, len(failure_events)) + for idx in range(n_plot): + ev, disk_id, nf_gnn, nf_bfs, result, fname = failure_events[idx] + label = (f"Event {ev}, Disk {disk_id} — " + f"GNN: {nf_gnn} fail, BFS: {nf_bfs} fail " + f"[new truth, {fname}]") + out_path = out_dir / f"debug_{idx:03d}_evt{ev}_disk{disk_id}.png" + plot_event_3panel( + result["hit_x"], result["hit_y"], result["energies"], + result["disk_id"], result["truth_labels"], + result["bfs_labels"], result["gnn_labels"], + result["edge_index"], result["edge_probs"], + crystal_map, out_path, event_label=label, + debug=True) + print(f" [{idx+1}/{n_plot}] {label}") + + else: + plotted = 0 + for v2_path in v2_files: + if plotted >= args.n_events: + break + tree = uproot.open(f"{v2_path}:EventNtuple/ntuple") + n_read = args.n_events * 3 + arrays = tree.arrays(branches, entry_stop=n_read) + fname = v2_path.name + + for ev in range(len(arrays)): + if plotted >= args.n_events: + break + for disk_id in [0, 1]: + if plotted >= args.n_events: + break + result = process_event_disk( + arrays, ev, disk_id, crystal_map, crystal_disk_map, + graph_cfg, model, stats, device, tau_edge, tau_node) + if result is None: + continue + + label = (f"Event {ev}, Disk {disk_id} " + f"[new truth, {args.split}, {fname}]") + out_path = out_dir / f"display_{plotted:03d}_evt{ev}_disk{disk_id}.png" + plot_event_3panel( + result["hit_x"], result["hit_y"], result["energies"], + result["disk_id"], result["truth_labels"], + result["bfs_labels"], result["gnn_labels"], + result["edge_index"], result["edge_probs"], + crystal_map, out_path, event_label=label) + print(f" [{plotted+1}/{args.n_events}] {label}") + plotted += 1 + + print(f"\nDone. Plots saved to {out_dir}/") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/plot_training.py b/CaloClusterGNN/scripts/plot_training.py new file mode 100644 index 0000000..e757307 --- /dev/null +++ b/CaloClusterGNN/scripts/plot_training.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +""" +Plot training curves from a completed run's history.json. + +Usage: + python3 scripts/plot_training.py --run-dir outputs/runs/simple_edge_net_v1 + python3 scripts/plot_training.py # auto-finds latest run +""" + +import argparse +import json +import sys +from pathlib import Path + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np + + +def find_latest_run(base="outputs/runs"): + """Find the most recently modified run directory.""" + base = Path(base) + if not base.exists(): + return None + runs = [d for d in base.iterdir() if d.is_dir() and (d / "history.json").exists()] + if not runs: + return None + return max(runs, key=lambda d: (d / "history.json").stat().st_mtime) + + +def load_history(run_dir): + """Load history.json from a run directory.""" + path = Path(run_dir) / "history.json" + if not path.exists(): + print(f"ERROR: {path} not found.") + sys.exit(1) + with open(path) as f: + return json.load(f) + + +def plot_loss(history, ax): + """Train vs val loss.""" + epochs = [h["epoch"] for h in history] + ax.plot(epochs, [h["train"]["loss"] for h in history], label="train", linewidth=1.5) + ax.plot(epochs, [h["val"]["loss"] for h in history], label="val", linewidth=1.5) + ax.set_xlabel("Epoch") + ax.set_ylabel("Loss (weighted BCE)") + ax.set_title("Loss") + ax.legend() + ax.grid(True, alpha=0.3) + + +def plot_f1(history, ax): + """Train vs val edge F1.""" + epochs = [h["epoch"] for h in history] + ax.plot(epochs, [h["train"]["f1"] for h in history], label="train", linewidth=1.5) + ax.plot(epochs, [h["val"]["f1"] for h in history], label="val", linewidth=1.5) + + # Mark best val F1 + val_f1 = [h["val"]["f1"] for h in history] + best_idx = int(np.argmax(val_f1)) + ax.axvline(epochs[best_idx], color="gray", linestyle="--", alpha=0.5) + ax.annotate(f"best={val_f1[best_idx]:.3f}\nepoch {epochs[best_idx]}", + xy=(epochs[best_idx], val_f1[best_idx]), + xytext=(10, -20), textcoords="offset points", fontsize=8, + arrowprops=dict(arrowstyle="->", color="gray")) + + ax.set_xlabel("Epoch") + ax.set_ylabel("F1") + ax.set_title("Edge F1 (positive class)") + ax.legend() + ax.grid(True, alpha=0.3) + + +def plot_precision_recall(history, ax): + """Val precision and recall over epochs.""" + epochs = [h["epoch"] for h in history] + ax.plot(epochs, [h["val"]["precision"] for h in history], + label="precision", linewidth=1.5) + ax.plot(epochs, [h["val"]["recall"] for h in history], + label="recall", linewidth=1.5) + ax.plot(epochs, [h["train"]["precision"] for h in history], + label="train P", linewidth=1, linestyle="--", alpha=0.5) + ax.plot(epochs, [h["train"]["recall"] for h in history], + label="train R", linewidth=1, linestyle="--", alpha=0.5) + ax.set_xlabel("Epoch") + ax.set_ylabel("Score") + ax.set_title("Precision / Recall") + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + + +def plot_auc(history, ax): + """Val ROC-AUC and PR-AUC over epochs.""" + epochs = [h["epoch"] for h in history] + roc = [h["val"].get("roc_auc", 0) for h in history] + pr = [h["val"].get("pr_auc", 0) for h in history] + + if max(roc) == 0 and max(pr) == 0: + ax.text(0.5, 0.5, "No AUC data", transform=ax.transAxes, + ha="center", va="center", fontsize=12, color="gray") + ax.set_title("Val AUC") + return + + ax.plot(epochs, roc, label="ROC AUC", linewidth=1.5) + ax.plot(epochs, pr, label="PR AUC", linewidth=1.5) + ax.set_xlabel("Epoch") + ax.set_ylabel("AUC") + ax.set_title("Val AUC") + ax.legend() + ax.grid(True, alpha=0.3) + + +def plot_lr(history, ax): + """Learning rate schedule.""" + epochs = [h["epoch"] for h in history] + lrs = [h["lr"] for h in history] + ax.plot(epochs, lrs, color="tab:orange", linewidth=1.5) + ax.set_xlabel("Epoch") + ax.set_ylabel("Learning Rate") + ax.set_title("LR Schedule") + ax.set_yscale("log") + ax.grid(True, alpha=0.3) + + +def plot_overview(history, out_path): + """Generate the 5-panel overview figure.""" + fig, axes = plt.subplots(2, 3, figsize=(16, 9)) + + plot_loss(history, axes[0, 0]) + plot_f1(history, axes[0, 1]) + plot_precision_recall(history, axes[0, 2]) + plot_auc(history, axes[1, 0]) + plot_lr(history, axes[1, 1]) + + # Summary text in bottom-right panel + ax_text = axes[1, 2] + ax_text.axis("off") + + val_f1 = [h["val"]["f1"] for h in history] + best_idx = int(np.argmax(val_f1)) + best = history[best_idx] + + lines = [ + f"Total epochs: {len(history)}", + f"Best epoch: {best['epoch']}", + f"", + f"Best val F1: {best['val']['f1']:.4f}", + f"Best val precision: {best['val']['precision']:.4f}", + f"Best val recall: {best['val']['recall']:.4f}", + f"Best val ROC AUC: {best['val'].get('roc_auc', 0):.4f}", + f"Best val PR AUC: {best['val'].get('pr_auc', 0):.4f}", + f"", + f"Final train loss: {history[-1]['train']['loss']:.4f}", + f"Final val loss: {history[-1]['val']['loss']:.4f}", + f"Final LR: {history[-1]['lr']:.1e}", + ] + ax_text.text(0.1, 0.95, "\n".join(lines), transform=ax_text.transAxes, + fontsize=10, verticalalignment="top", fontfamily="monospace", + bbox=dict(boxstyle="round,pad=0.5", facecolor="lightyellow", alpha=0.8)) + ax_text.set_title("Summary") + + fig.suptitle("SimpleEdgeNet Training", fontsize=14, fontweight="bold") + plt.tight_layout() + plt.savefig(out_path, dpi=150, bbox_inches="tight") + print(f"Saved: {out_path}") + plt.close() + + +def main(): + parser = argparse.ArgumentParser(description="Plot training curves") + parser.add_argument("--run-dir", type=str, default=None, + help="Run directory (auto-finds latest if omitted)") + args = parser.parse_args() + + if args.run_dir: + run_dir = Path(args.run_dir) + else: + run_dir = find_latest_run() + if run_dir is None: + print("ERROR: No completed runs found in outputs/runs/") + sys.exit(1) + print(f"Using latest run: {run_dir}") + + history = load_history(run_dir) + print(f"Loaded {len(history)} epochs from {run_dir / 'history.json'}") + + out_path = run_dir / "training_curves.png" + plot_overview(history, out_path) + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/smoke_test_env.py b/CaloClusterGNN/scripts/smoke_test_env.py new file mode 100644 index 0000000..4a1cbda --- /dev/null +++ b/CaloClusterGNN/scripts/smoke_test_env.py @@ -0,0 +1,85 @@ +""" +Smoke test: verify the environment has all required packages and that a +minimal GNN forward pass works. Run after sourcing setup_env.sh. + + python3 scripts/smoke_test_env.py +""" + +import sys + +def check(name, fn): + try: + result = fn() + print(f" {name}: {result}") + return True + except Exception as e: + print(f" {name}: FAILED — {e}") + return False + +print("=== Package versions ===") +ok = True +ok &= check("python", lambda: sys.version.split()[0]) +ok &= check("torch", lambda: __import__("torch").__version__) +ok &= check("torch_geometric", lambda: __import__("torch_geometric").__version__) +ok &= check("uproot", lambda: __import__("uproot").__version__) +ok &= check("numpy", lambda: __import__("numpy").__version__) +ok &= check("scipy", lambda: __import__("scipy").__version__) +ok &= check("sklearn", lambda: __import__("sklearn").__version__) +ok &= check("matplotlib", lambda: __import__("matplotlib").__version__) + +print("\n=== CUDA ===") +import torch +print(f" CUDA available: {torch.cuda.is_available()}") +if torch.cuda.is_available(): + print(f" Device: {torch.cuda.get_device_name(0)}") +else: + print(" (expected on login node — use GPU node for training)") + +print("\n=== Minimal GNN forward pass ===") +try: + import torch + import numpy as np + from scipy.spatial import cKDTree + from torch_geometric.data import Data + + # 5 hits, 6 node features (log_E, t, x, y, r, E_rel) + x = torch.randn(5, 6) + pos = np.random.randn(5, 2) # (x, y) positions + + # Build radius graph with scipy (no torch-cluster needed) + tree = cKDTree(pos) + pairs = tree.query_pairs(r=2.0) + src = torch.tensor([i for i, j in pairs] + [j for i, j in pairs], dtype=torch.long) + dst = torch.tensor([j for i, j in pairs] + [i for i, j in pairs], dtype=torch.long) + edge_index = torch.stack([src, dst], dim=0) if len(pairs) > 0 else torch.zeros(2, 0, dtype=torch.long) + n_edges = edge_index.shape[1] + edge_attr = torch.randn(n_edges, 10) # 10 edge features + + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + print(f" Graph: {data.num_nodes} nodes, {data.num_edges} edges") + + # Tiny linear message-passing step + from torch_geometric.nn import MessagePassing + class TinyMP(MessagePassing): + def __init__(self): + super().__init__(aggr="sum") + self.lin = torch.nn.Linear(6, 6) + def forward(self, x, edge_index): + return self.propagate(edge_index, x=x) + def message(self, x_j): + return self.lin(x_j) + + model = TinyMP() + out = model(data.x, data.edge_index) + assert out.shape == (5, 6) + print(f" Forward pass output shape: {tuple(out.shape)} — OK") +except Exception as e: + print(f" FAILED — {e}") + ok = False + +print() +if ok: + print("All checks passed.") +else: + print("Some checks FAILED. See above.") + sys.exit(1) diff --git a/CaloClusterGNN/scripts/train_gnn.py b/CaloClusterGNN/scripts/train_gnn.py new file mode 100644 index 0000000..15e0795 --- /dev/null +++ b/CaloClusterGNN/scripts/train_gnn.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +Train a GNN for calorimeter edge classification. + +Usage: + python3 scripts/train_gnn.py --config configs/default.yaml + python3 scripts/train_gnn.py --config configs/default.yaml --epochs 20 --device cpu +""" + +import argparse +import json +import subprocess +import sys +import time +from datetime import datetime +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import torch +import yaml + +from src.data.dataset import CaloGraphDataset +from src.data.normalization import load_stats, normalize_graph +from src.models import build_model +from src.training.losses import compute_class_weights +from src.training.trainer import Trainer + + +def load_split_files(split_path): + """Load file stems from a split file.""" + with open(split_path) as f: + return [line.strip() for line in f if line.strip()] + + +def get_git_hash(): + """Get current git hash, or 'unknown'.""" + try: + return subprocess.check_output( + ["git", "rev-parse", "--short", "HEAD"], + stderr=subprocess.DEVNULL, + ).decode().strip() + except Exception: + return "unknown" + + +def main(): + parser = argparse.ArgumentParser(description="Train GNN for calorimeter clustering") + parser.add_argument("--config", type=str, default="configs/default.yaml") + parser.add_argument("--device", type=str, default=None, + help="Device: 'cpu', 'cuda', or 'cuda:0'. Auto-detects if omitted.") + parser.add_argument("--epochs", type=int, default=None, help="Override max epochs") + parser.add_argument("--batch-size", type=int, default=None, help="Override batch size") + parser.add_argument("--run-name", type=str, default=None, help="Run directory name") + parser.add_argument("--resume", type=str, default=None, + help="Path to checkpoint to resume from (loads model weights only, " + "resets optimizer for staged training)") + args = parser.parse_args() + + # Load config + with open(args.config) as f: + cfg = yaml.safe_load(f) + + train_cfg = cfg["train"] + if args.epochs is not None: + train_cfg["epochs"] = args.epochs + if args.batch_size is not None: + train_cfg["batch_size"] = args.batch_size + + # Device + if args.device: + device = torch.device(args.device) + elif torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + print(f"Device: {device}") + + # Run directory + run_name = args.run_name or datetime.now().strftime("%Y%m%d_%H%M%S") + run_dir = Path(cfg["output"]["run_dir"]) / run_name + run_dir.mkdir(parents=True, exist_ok=True) + + # Save config + metadata + meta = { + "git_hash": get_git_hash(), + "config_path": args.config, + "device": str(device), + "timestamp": datetime.now().isoformat(), + } + with open(run_dir / "config.yaml", "w") as f: + yaml.dump(cfg, f, default_flow_style=False) + with open(run_dir / "metadata.json", "w") as f: + json.dump(meta, f, indent=2) + + # Load datasets — preload into memory to avoid per-file I/O bottleneck + processed_dir = cfg["data"]["processed_dir"] + train_files = load_split_files(cfg["data"]["splits"]["train"]) + val_files = load_split_files(cfg["data"]["splits"]["val"]) + + # Use packed files if available (single torch.load vs 29K individual loads) + train_packed = Path(processed_dir) / "train.pt" + val_packed = Path(processed_dir) / "val.pt" + + print(f"Loading train dataset from {processed_dir}") + train_dataset = CaloGraphDataset( + processed_dir, file_list=train_files, preload=True, + packed_path=train_packed if train_packed.exists() else None, + ) + print(f"Loading val dataset from {processed_dir}") + val_dataset = CaloGraphDataset( + processed_dir, file_list=val_files, preload=True, + packed_path=val_packed if val_packed.exists() else None, + ) + + print(f" Train: {len(train_dataset)} graphs") + print(f" Val: {len(val_dataset)} graphs") + + if len(train_dataset) == 0: + print("ERROR: No training graphs found. Run build_graphs.py first.") + sys.exit(1) + + # Compute class weights before normalization + cw = compute_class_weights(train_dataset) + pos_weight = cw["pos_weight"] + print(f" Class balance: {cw['n_pos']} pos, {cw['n_neg']} neg " + f"(pos_weight={pos_weight.item():.3f})") + + # Apply normalization in-place to cached data + stats_path = cfg["data"]["normalization_stats"] + print(f"Loading normalization stats from {stats_path}") + stats = load_stats(stats_path) + for data in train_dataset._cache: + normalize_graph(data, stats) + for data in val_dataset._cache: + normalize_graph(data, stats) + + # Build model + model = build_model(cfg) + n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + print(f"\nModel: {cfg['model']['name']}") + print(f" Parameters: {n_params:,}") + print(f" Hidden dim: {cfg['model']['hidden_dim']}") + print(f" MP layers: {cfg['model']['n_mp_layers']}") + + # Resume from checkpoint (model weights only — optimizer resets for staged training) + if args.resume: + ckpt = torch.load(args.resume, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + print(f" Resumed weights from {args.resume} " + f"(epoch {ckpt['epoch']}, val F1={ckpt['val_f1']:.4f})") + + # Train + trainer = Trainer( + model=model, + train_dataset=train_dataset, + val_dataset=val_dataset, + cfg=train_cfg, + pos_weight=pos_weight, + device=device, + run_dir=run_dir, + ) + + print("\n" + "=" * 70) + trainer.fit() + print("=" * 70) + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/tune_threshold.py b/CaloClusterGNN/scripts/tune_threshold.py new file mode 100644 index 0000000..1900d7f --- /dev/null +++ b/CaloClusterGNN/scripts/tune_threshold.py @@ -0,0 +1,506 @@ +#!/usr/bin/env python3 +""" +Threshold tuning for GNN edge classification on the validation set. + +Sweeps tau_edge to find the optimal edge probability threshold: + 1. Coarse grid: 0.1 to 0.9 in steps of 0.1 + 2. Fine grid: +/-0.1 around coarse optimum in steps of 0.02 + +For each threshold, computes: + - Edge-level pairwise F1 (precision, recall) + - Cluster-level purity, completeness (energy-weighted matching) + - Truth match rate, reco match rate + - Number of merges and splits + +Saves results to outputs/threshold_sweep/sweep_results.csv and generates plot. + +Usage: + source setup_env.sh + python3 scripts/tune_threshold.py + python3 scripts/tune_threshold.py --checkpoint path/to/best_model.pt +""" + +import argparse +import csv +import sys +import time +from collections import defaultdict +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import torch +import yaml + +from src.data.normalization import load_stats, normalize_graph +from src.inference.cluster_reco import reconstruct_clusters +from src.models import build_model + + +def match_clusters_energy(pred_labels, truth_labels, energies): + """Energy-weighted greedy cluster matching (same logic as head-to-head).""" + pred_ids = set(pred_labels[pred_labels >= 0].tolist()) + truth_ids = set(truth_labels[truth_labels >= 0].tolist()) + + if not pred_ids or not truth_ids: + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": 0, "n_matched_truth": 0, + "purities": [], "completenesses": [], + "n_split": 0, "n_merged": 0, + } + + overlap = defaultdict(lambda: defaultdict(float)) + pred_energy = defaultdict(float) + truth_energy = defaultdict(float) + + for i in range(len(energies)): + e = energies[i] + p = pred_labels[i] + t = truth_labels[i] + if p >= 0: + pred_energy[p] += e + if t >= 0: + truth_energy[t] += e + if p >= 0 and t >= 0: + overlap[p][t] += e + + purities, completenesses = [], [] + matched_truth = set() + for p in sorted(pred_ids): + if p not in overlap: + continue + best_t = max(overlap[p], key=lambda t: overlap[p][t]) + shared = overlap[p][best_t] + pur = shared / pred_energy[p] if pred_energy[p] > 0 else 0 + comp = shared / truth_energy[best_t] if truth_energy[best_t] > 0 else 0 + if pur > 0.5 and comp > 0.5: + purities.append(pur) + completenesses.append(comp) + matched_truth.add(best_t) + + # Splits: truth cluster matched by >1 pred cluster + truth_to_pred = defaultdict(list) + for p in sorted(pred_ids): + if p not in overlap: + continue + for t, e in overlap[p].items(): + if pred_energy[p] > 0 and e / pred_energy[p] > 0.5: + truth_to_pred[t].append(p) + n_split = sum(1 for ps in truth_to_pred.values() if len(ps) > 1) + + # Merges: pred cluster overlapping >1 truth cluster significantly + n_merged = 0 + for p in sorted(pred_ids): + if p not in overlap: + continue + sig = [t for t, e in overlap[p].items() + if pred_energy[p] > 0 and e / pred_energy[p] > 0.1] + if len(sig) > 1: + n_merged += 1 + + return { + "n_pred": len(pred_ids), "n_truth": len(truth_ids), + "n_matched_pred": len(purities), "n_matched_truth": len(matched_truth), + "purities": purities, "completenesses": completenesses, + "n_split": n_split, "n_merged": n_merged, + } + + +def evaluate_threshold(graphs_info, threshold, min_hits=2, min_energy_mev=10.0, + tau_node=None): + """Evaluate a single threshold across all precomputed graph data.""" + cluster_results = [] + # Accumulators for edge-level pairwise metrics + total_tp, total_fp, total_fn = 0, 0, 0 + + for g in graphs_info: + # Reconstruct clusters at this threshold + cluster_labels, _ = reconstruct_clusters( + edge_index=g["edge_index"], + edge_logits=g["logits"], + n_nodes=g["n_nodes"], + energies=g["energies"], + tau_edge=threshold, + min_hits=min_hits, + min_energy_mev=min_energy_mev, + node_logits=g.get("node_logits"), + tau_node=tau_node, + ) + cluster_results.append( + match_clusters_energy(cluster_labels, g["truth_labels"], g["energies"]) + ) + + # Edge-level pairwise metrics (on masked edges only) + probs = g["edge_probs"] + y = g["y_masked"] + preds = (probs >= threshold).astype(np.int32) + total_tp += ((preds == 1) & (y == 1)).sum() + total_fp += ((preds == 1) & (y == 0)).sum() + total_fn += ((preds == 0) & (y == 1)).sum() + + # Aggregate pairwise metrics + pw_prec = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0.0 + pw_rec = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0.0 + pw_f1 = 2 * pw_prec * pw_rec / (pw_prec + pw_rec) if (pw_prec + pw_rec) > 0 else 0.0 + + # Aggregate cluster metrics + all_pur = [p for r in cluster_results for p in r["purities"]] + all_comp = [c for r in cluster_results for c in r["completenesses"]] + n_pred = sum(r["n_pred"] for r in cluster_results) + n_truth = sum(r["n_truth"] for r in cluster_results) + n_matched_pred = sum(r["n_matched_pred"] for r in cluster_results) + n_matched_truth = sum(r["n_matched_truth"] for r in cluster_results) + n_split = sum(r["n_split"] for r in cluster_results) + n_merged = sum(r["n_merged"] for r in cluster_results) + + return { + "tau_edge": threshold, + "pairwise_precision": float(pw_prec), + "pairwise_recall": float(pw_rec), + "pairwise_f1": float(pw_f1), + "mean_purity": float(np.mean(all_pur)) if all_pur else 0.0, + "mean_completeness": float(np.mean(all_comp)) if all_comp else 0.0, + "reco_match_rate": n_matched_pred / n_pred if n_pred > 0 else 0.0, + "truth_match_rate": n_matched_truth / n_truth if n_truth > 0 else 0.0, + "n_pred": n_pred, + "n_truth": n_truth, + "n_matched_pred": n_matched_pred, + "n_matched_truth": n_matched_truth, + "n_split": n_split, + "n_merged": n_merged, + } + + +def main(): + parser = argparse.ArgumentParser(description="Tune edge threshold on validation set") + parser.add_argument("--checkpoint", type=str, + default="outputs/runs/simple_edge_net_v1/checkpoints/best_model.pt") + parser.add_argument("--config", type=str, default="configs/default.yaml") + parser.add_argument("--output-dir", type=str, default=None, + help="Output directory (default: outputs/threshold_sweep_)") + parser.add_argument("--tau-node", type=float, default=None, + help="Fixed node saliency threshold (default: from config if model " + "supports it)") + parser.add_argument("--min-hits", type=int, default=1, + help="Min hits per cluster (default 1 for tuning; " + "production uses 2)") + parser.add_argument("--min-energy", type=float, default=0.0, + help="Min energy per cluster in MeV (default 0 for tuning; " + "production uses 10)") + args = parser.parse_args() + + with open(args.config) as f: + cfg = yaml.safe_load(f) + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"Device: {device}") + + # Load model + model = build_model(cfg) + ckpt = torch.load(args.checkpoint, weights_only=False, map_location=device) + model.load_state_dict(ckpt["model_state_dict"]) + model.to(device).eval() + model_name = cfg["model"].get("name", "SimpleEdgeNet") + print(f"Model: {model_name}") + print(f"Loaded from epoch {ckpt['epoch']} (val F1={ckpt['val_f1']:.4f})") + + # Node saliency threshold (only when explicitly requested via --tau-node, + # since the node head may be untrained in early stages) + tau_node = args.tau_node + if tau_node is not None: + print(f"tau_node = {tau_node}") + else: + print("tau_node = disabled (pass --tau-node to enable)") + + # Output directory (model-specific default) + if args.output_dir is None: + suffix = model_name.lower() + args.output_dir = f"outputs/threshold_sweep_{suffix}" + + # Load val graphs (un-normalized packed file) + val_packed = Path(cfg["data"]["processed_dir"]) / "val.pt" + if not val_packed.exists(): + print(f"ERROR: {val_packed} not found. Run scripts/pack_graphs.py first.") + sys.exit(1) + print(f"Loading val graphs from {val_packed}...") + val_graphs = torch.load(val_packed, weights_only=False) + print(f" {len(val_graphs)} graphs loaded") + + # Load normalization stats + stats = load_stats(cfg["data"]["normalization_stats"]) + + # Run inference on all val graphs once; precompute everything needed for sweeps + print("Running model inference on all val graphs...") + t0 = time.time() + graphs_info = [] + + for idx, data in enumerate(val_graphs): + # Extract raw energies BEFORE normalization (x[:,0] = log(1+E)) + log_e = data.x[:, 0].numpy() + energies = np.exp(log_e) - 1.0 + truth_labels = data.hit_truth_cluster.numpy() + + # Normalize a clone for model input + data_norm = data.clone() + normalize_graph(data_norm, stats) + + # Forward pass + with torch.no_grad(): + output = model(data_norm.to(device)) + + # Handle both dict (CaloClusterNet) and tensor (SimpleEdgeNet) output + if isinstance(output, dict): + logits_np = output["edge_logits"].cpu().numpy() + nl = output.get("node_logits") + node_logits_np = nl.cpu().numpy() if nl is not None else None + else: + logits_np = output.cpu().numpy() + node_logits_np = None + + # Precompute sigmoid probs on masked edges for pairwise metrics + mask = data.edge_mask.bool().numpy() + all_probs = 1.0 / (1.0 + np.exp(-logits_np.astype(np.float64))) + + graphs_info.append({ + "logits": logits_np, + "node_logits": node_logits_np, + "edge_index": data.edge_index.numpy(), + "n_nodes": data.x.shape[0], + "energies": energies, + "truth_labels": truth_labels, + "edge_probs": all_probs[mask], + "y_masked": data.y_edge[mask].numpy().astype(np.int32), + }) + + if (idx + 1) % 1000 == 0: + print(f" {idx + 1}/{len(val_graphs)}...") + + elapsed = time.time() - t0 + print(f"Inference done: {len(graphs_info)} graphs in {elapsed:.1f}s") + + # --- Coarse sweep --- + coarse_thresholds = list(np.arange(0.1, 0.95, 0.1)) + min_hits = args.min_hits + min_energy = args.min_energy + print(f"\nCluster cleanup: min_hits={min_hits}, min_energy={min_energy} MeV") + + print(f"\nCoarse sweep ({len(coarse_thresholds)} thresholds):") + coarse_results = [] + for tau in coarse_thresholds: + t1 = time.time() + result = evaluate_threshold(graphs_info, tau, min_hits, min_energy, + tau_node=tau_node) + dt = time.time() - t1 + coarse_results.append(result) + print(f" tau={tau:.2f} F1={result['pairwise_f1']:.4f} " + f"truth_match={result['truth_match_rate']:.4f} " + f"purity={result['mean_purity']:.4f} " + f"compl={result['mean_completeness']:.4f} " + f"splits={result['n_split']:>5d} merges={result['n_merged']:>5d} " + f"({dt:.1f}s)") + + best_coarse = max(coarse_results, key=lambda r: r["pairwise_f1"]) + print(f"\nBest coarse: tau={best_coarse['tau_edge']:.2f} " + f"(F1={best_coarse['pairwise_f1']:.4f}, " + f"truth_match={best_coarse['truth_match_rate']:.4f})") + + # --- Fine sweep around optimum --- + fine_lo = max(0.02, best_coarse["tau_edge"] - 0.1) + fine_hi = min(0.98, best_coarse["tau_edge"] + 0.1) + fine_thresholds = list(np.arange(fine_lo, fine_hi + 0.005, 0.02)) + # Remove values already covered by coarse sweep + fine_thresholds = [t for t in fine_thresholds + if not any(abs(t - c) < 0.005 for c in coarse_thresholds)] + + print(f"\nFine sweep ({len(fine_thresholds)} thresholds " + f"in [{fine_lo:.2f}, {fine_hi:.2f}]):") + fine_results = [] + for tau in fine_thresholds: + t1 = time.time() + result = evaluate_threshold(graphs_info, tau, min_hits, min_energy, + tau_node=tau_node) + dt = time.time() - t1 + fine_results.append(result) + print(f" tau={tau:.2f} F1={result['pairwise_f1']:.4f} " + f"truth_match={result['truth_match_rate']:.4f} " + f"purity={result['mean_purity']:.4f} " + f"compl={result['mean_completeness']:.4f} " + f"splits={result['n_split']:>5d} merges={result['n_merged']:>5d} " + f"({dt:.1f}s)") + + # --- Combine, find best, report --- + all_results = coarse_results + fine_results + all_results.sort(key=lambda r: r["tau_edge"]) + + best = max(all_results, key=lambda r: r["pairwise_f1"]) + best_tau = best["tau_edge"] + + print(f"\n{'='*70}") + print(f" OPTIMAL tau_edge = {best_tau:.2f} (maximizes pairwise F1)") + print(f"{'='*70}") + print(f" Pairwise F1: {best['pairwise_f1']:.4f} " + f"(P={best['pairwise_precision']:.4f}, R={best['pairwise_recall']:.4f})") + print(f" Truth match rate: {best['truth_match_rate']:.4f}") + print(f" Reco match rate: {best['reco_match_rate']:.4f}") + print(f" Mean purity: {best['mean_purity']:.4f}") + print(f" Mean completeness: {best['mean_completeness']:.4f}") + print(f" Reco clusters: {best['n_pred']} (truth: {best['n_truth']})") + print(f" Splits: {best['n_split']} Merges: {best['n_merged']}") + print(f"{'='*70}") + + # Also report metrics with production cleanup at the optimal threshold + inf_cfg = cfg["inference"] + prod_min_hits = inf_cfg.get("min_hits", 2) + prod_min_energy = inf_cfg.get("min_energy_mev", 10.0) + if min_hits != prod_min_hits or min_energy != prod_min_energy: + print(f"\nWith production cleanup (min_hits={prod_min_hits}, " + f"min_energy={prod_min_energy} MeV):") + prod = evaluate_threshold(graphs_info, best_tau, + prod_min_hits, prod_min_energy, + tau_node=tau_node) + print(f" Truth match rate: {prod['truth_match_rate']:.4f}") + print(f" Reco match rate: {prod['reco_match_rate']:.4f}") + print(f" Mean purity: {prod['mean_purity']:.4f}") + print(f" Mean completeness: {prod['mean_completeness']:.4f}") + print(f" Reco clusters: {prod['n_pred']} (truth: {prod['n_truth']})") + print(f" Splits: {prod['n_split']} Merges: {prod['n_merged']}") + + # --- Save CSV --- + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + csv_path = out_dir / "sweep_results.csv" + fieldnames = list(all_results[0].keys()) + with open(csv_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=fieldnames) + writer.writeheader() + for r in all_results: + row = {} + for k, v in r.items(): + row[k] = f"{v:.6f}" if isinstance(v, float) else v + writer.writerow(row) + print(f"\nSaved sweep results to {csv_path}") + + # --- Plot --- + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + taus = [r["tau_edge"] for r in all_results] + + fig, axes = plt.subplots(2, 3, figsize=(18, 11)) + fig.suptitle(f"Threshold Sweep on Validation Set — " + f"Optimal $\\tau_{{edge}}$ = {best_tau:.2f}", + fontsize=14, fontweight="bold") + + # 1. Pairwise edge metrics + ax = axes[0, 0] + ax.plot(taus, [r["pairwise_f1"] for r in all_results], + "o-", label="F1", color="steelblue", linewidth=2, markersize=5) + ax.plot(taus, [r["pairwise_precision"] for r in all_results], + "s--", label="Precision", color="coral", alpha=0.7, markersize=4) + ax.plot(taus, [r["pairwise_recall"] for r in all_results], + "^--", label="Recall", color="green", alpha=0.7, markersize=4) + ax.axvline(best_tau, color="red", linestyle=":", alpha=0.5, + label=f"Optimal ({best_tau:.2f})") + ax.set_xlabel(r"$\tau_{edge}$") + ax.set_ylabel("Score") + ax.set_title("Pairwise Edge Metrics") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + ax.set_xlim(0, 1) + + # 2. Cluster match rates + ax = axes[0, 1] + ax.plot(taus, [r["truth_match_rate"] * 100 for r in all_results], + "o-", label="Truth match rate", color="steelblue", linewidth=2, markersize=5) + ax.plot(taus, [r["reco_match_rate"] * 100 for r in all_results], + "s-", label="Reco match rate", color="coral", linewidth=2, markersize=5) + ax.axvline(best_tau, color="red", linestyle=":", alpha=0.5) + ax.set_xlabel(r"$\tau_{edge}$") + ax.set_ylabel("Match rate (%)") + ax.set_title("Cluster Match Rates") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + ax.set_xlim(0, 1) + + # 3. Purity & completeness + ax = axes[0, 2] + ax.plot(taus, [r["mean_purity"] for r in all_results], + "o-", label="Purity", color="steelblue", linewidth=2, markersize=5) + ax.plot(taus, [r["mean_completeness"] for r in all_results], + "s-", label="Completeness", color="coral", linewidth=2, markersize=5) + ax.axvline(best_tau, color="red", linestyle=":", alpha=0.5) + ax.set_xlabel(r"$\tau_{edge}$") + ax.set_ylabel("Score") + ax.set_title("Mean Purity & Completeness") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + ax.set_xlim(0, 1) + + # 4. Splits & merges + ax = axes[1, 0] + ax.plot(taus, [r["n_split"] for r in all_results], + "o-", label="Splits", color="orange", linewidth=2, markersize=5) + ax.plot(taus, [r["n_merged"] for r in all_results], + "s-", label="Merges", color="purple", linewidth=2, markersize=5) + ax.axvline(best_tau, color="red", linestyle=":", alpha=0.5) + ax.set_xlabel(r"$\tau_{edge}$") + ax.set_ylabel("Count") + ax.set_title("Cluster Splits & Merges") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + ax.set_xlim(0, 1) + + # 5. Number of predicted clusters vs truth + ax = axes[1, 1] + ax.plot(taus, [r["n_pred"] for r in all_results], + "o-", color="steelblue", linewidth=2, markersize=5, label="Predicted") + n_truth = all_results[0]["n_truth"] + ax.axhline(n_truth, color="red", linestyle="--", alpha=0.7, + label=f"Truth ({n_truth:,})") + ax.axvline(best_tau, color="red", linestyle=":", alpha=0.5) + ax.set_xlabel(r"$\tau_{edge}$") + ax.set_ylabel("Number of clusters") + ax.set_title("Predicted vs Truth Clusters") + ax.legend(fontsize=9) + ax.grid(alpha=0.3) + ax.set_xlim(0, 1) + + # 6. Summary table + ax = axes[1, 2] + ax.axis("off") + table_data = [ + [r"Optimal $\tau_{edge}$", f"{best_tau:.2f}"], + ["Pairwise F1", f"{best['pairwise_f1']:.4f}"], + ["Pairwise P / R", + f"{best['pairwise_precision']:.4f} / {best['pairwise_recall']:.4f}"], + ["Truth match rate", f"{best['truth_match_rate']:.1%}"], + ["Reco match rate", f"{best['reco_match_rate']:.1%}"], + ["Mean purity", f"{best['mean_purity']:.4f}"], + ["Mean completeness", f"{best['mean_completeness']:.4f}"], + ["Splits", f"{best['n_split']:,}"], + ["Merges", f"{best['n_merged']:,}"], + ["Pred / Truth clusters", f"{best['n_pred']:,} / {best['n_truth']:,}"], + ] + table = ax.table(cellText=table_data, colLabels=["Metric", "Value"], + cellLoc="center", loc="center") + table.auto_set_font_size(False) + table.set_fontsize(11) + table.scale(1, 1.5) + ax.set_title("Optimal Threshold Summary", pad=20) + + plt.tight_layout() + plot_path = out_dir / "threshold_sweep.png" + plt.savefig(plot_path, dpi=150, bbox_inches="tight") + print(f"Saved plot to {plot_path}") + + print(f"\nTo freeze this threshold, update configs/default.yaml:") + print(f" inference:") + print(f" tau_edge: {best_tau:.2f}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/validate_ancestry.py b/CaloClusterGNN/scripts/validate_ancestry.py new file mode 100644 index 0000000..bdcca2c --- /dev/null +++ b/CaloClusterGNN/scripts/validate_ancestry.py @@ -0,0 +1,262 @@ +"""Task 11a4: Validate ancestry data in v2 ROOT files. + +Reads ``calomcsim.ancestorSimIds`` from reprocessed ROOT files and checks: +- Chain completeness (non-empty, ends at a root with no parent) +- Calo-entrant identification per disk +- Chain length distribution +- Comparison of old vs new truth cluster statistics +""" + +import argparse +import csv +import sys +from collections import Counter +from pathlib import Path + +import numpy as np + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +from src.data.truth_labels import assign_mc_truth +from src.data.truth_labels_primary import ( + assign_mc_truth_primary, + build_calo_root_map, +) + + +def load_crystal_disk_map(csv_path=None): + """Load crystalId -> diskId mapping from geometry CSV.""" + if csv_path is None: + csv_path = Path(__file__).resolve().parents[1] / "data" / "crystal_geometry.csv" + disk_map = {} + with open(csv_path) as f: + for row in csv.DictReader(f): + disk_map[int(row["crystalId"])] = int(row["diskId"]) + return disk_map + + +def validate_file(filepath, crystal_disk_map, max_events=None, verbose=False): + """Validate ancestry data in one v2 ROOT file. + + Returns a dict with aggregate statistics. + """ + import uproot + + tree = uproot.open(f"{filepath}:EventNtuple/ntuple") + branches = [ + "calomcsim.id", "calomcsim.pdg", "calomcsim.startCode", + "calomcsim.ancestorSimIds", + "calohitsmc.simParticleIds", "calohitsmc.eDeps", + "calohits.crystalId_", + ] + arrays = tree.arrays(branches, entry_stop=max_events) + n_events = len(arrays["calomcsim.id"]) + + stats = { + "n_events": n_events, + "total_simps": 0, + "empty_chains": 0, + "chain_lengths": [], + "startcode_dist": Counter(), + # Per-disk calo-entrant stats + "n_calo_entrants": [], # unique calo-entrants per event + # Truth comparison + "old_clusters": 0, "new_clusters": 0, + "old_singletons": 0, "new_singletons": 0, + "old_ambiguous": 0, "new_ambiguous": 0, + "total_hits": 0, + } + + for evt in range(n_events): + sim_ids_evt = arrays["calomcsim.id"][evt] + anc_evt = arrays["calomcsim.ancestorSimIds"][evt] + pdg_evt = arrays["calomcsim.pdg"][evt] + start_evt = arrays["calomcsim.startCode"][evt] + + hit_simids = arrays["calohitsmc.simParticleIds"][evt] + hit_edeps = arrays["calohitsmc.eDeps"][evt] + hit_cryids = arrays["calohits.crystalId_"][evt] + n_hits = len(hit_simids) + + # --- Validate chains --- + n_simps = len(sim_ids_evt) + stats["total_simps"] += n_simps + + for j in range(n_simps): + chain = list(anc_evt[j]) + stats["chain_lengths"].append(len(chain)) + if len(chain) == 0: + stats["empty_chains"] += 1 + stats["startcode_dist"][int(start_evt[j])] += 1 + + # --- Build calo-root map --- + calo_root_map = build_calo_root_map( + sim_ids_evt, anc_evt, hit_simids, hit_cryids, crystal_disk_map + ) + + # Count unique calo-entrants + entrants = set(calo_root_map.values()) + stats["n_calo_entrants"].append(len(entrants)) + + # --- Compare old vs new truth --- + if n_hits < 2: + continue + + hit_disks = np.array([crystal_disk_map.get(int(c), -1) + for c in hit_cryids], dtype=np.int64) + + for disk_id in [0, 1]: + mask_d = hit_disks == disk_id + n_disk = mask_d.sum() + if n_disk < 2: + continue + idx = np.where(mask_d)[0] + d_sim = [list(hit_simids[i]) for i in idx] + d_edeps = [list(hit_edeps[i]) for i in idx] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + # dummy edge_index (not needed for cluster counting) + edge_index = np.empty((2, 0), dtype=np.int64) + + _, _, tc_old, amb_old = assign_mc_truth( + d_sim, d_edeps, d_disks, edge_index) + _, _, tc_new, amb_new = assign_mc_truth_primary( + d_sim, d_edeps, d_disks, edge_index, calo_root_map) + + stats["total_hits"] += n_disk + stats["old_ambiguous"] += amb_old.sum() + stats["new_ambiguous"] += amb_new.sum() + + old_ids = tc_old[tc_old >= 0] + new_ids = tc_new[tc_new >= 0] + if len(old_ids) > 0: + old_counts = Counter(old_ids.tolist()) + stats["old_clusters"] += len(old_counts) + stats["old_singletons"] += sum(1 for v in old_counts.values() if v == 1) + if len(new_ids) > 0: + new_counts = Counter(new_ids.tolist()) + stats["new_clusters"] += len(new_counts) + stats["new_singletons"] += sum(1 for v in new_counts.values() if v == 1) + + return stats + + +def main(): + parser = argparse.ArgumentParser(description="Validate ancestry in v2 ROOT files") + parser.add_argument("--root-dir", + default="/exp/mu2e/data/users/wzhou2/GNN/root_files_v2", + help="Directory with v2 ROOT files") + parser.add_argument("--max-files", type=int, default=None, + help="Limit number of files to process") + parser.add_argument("--max-events", type=int, default=500, + help="Max events per file") + parser.add_argument("-v", "--verbose", action="store_true") + args = parser.parse_args() + + root_dir = Path(args.root_dir) + files = sorted(root_dir.glob("mcs.*.root")) + # Filter to complete files (>= 1800 MB) + files = [f for f in files if f.stat().st_size >= 1800 * 1024 * 1024] + # Filter to valid files (have EventNtuple tree) + import uproot as _uproot + valid_files = [] + for f in files: + try: + _uproot.open(f"{f}:EventNtuple/ntuple") + valid_files.append(f) + except Exception: + print(f" SKIP (corrupt): {f.name}") + files = valid_files + if args.max_files: + files = files[:args.max_files] + + print(f"Validating {len(files)} v2 ROOT files " + f"({args.max_events or 'all'} events/file)") + + crystal_disk_map = load_crystal_disk_map() + print(f"Crystal disk map: {len(crystal_disk_map)} crystals") + + # Aggregate + total = { + "n_events": 0, "total_simps": 0, "empty_chains": 0, + "chain_lengths": [], "startcode_dist": Counter(), + "n_calo_entrants": [], + "old_clusters": 0, "new_clusters": 0, + "old_singletons": 0, "new_singletons": 0, + "old_ambiguous": 0, "new_ambiguous": 0, "total_hits": 0, + } + + for i, f in enumerate(files): + print(f" [{i+1}/{len(files)}] {f.name}...", end=" ", flush=True) + s = validate_file(f, crystal_disk_map, args.max_events, args.verbose) + total["n_events"] += s["n_events"] + total["total_simps"] += s["total_simps"] + total["empty_chains"] += s["empty_chains"] + total["chain_lengths"].extend(s["chain_lengths"]) + total["startcode_dist"] += s["startcode_dist"] + total["n_calo_entrants"].extend(s["n_calo_entrants"]) + for key in ["old_clusters", "new_clusters", "old_singletons", + "new_singletons", "old_ambiguous", "new_ambiguous", + "total_hits"]: + total[key] += s[key] + print(f"{s['n_events']} events, {s['total_simps']} simPs") + + print("\n" + "=" * 60) + print("ANCESTRY VALIDATION SUMMARY") + print("=" * 60) + print(f"Events: {total['n_events']}") + print(f"SimParticles: {total['total_simps']}") + + chains = total["chain_lengths"] + print(f"\nChain lengths:") + print(f" Empty (no ancestors): {total['empty_chains']} " + f"({100*total['empty_chains']/max(total['total_simps'],1):.1f}%)") + if chains: + arr = np.array(chains) + print(f" Mean: {arr.mean():.2f}, Median: {np.median(arr):.0f}, " + f"Max: {arr.max()}") + for length in [0, 1, 2, 3, 4, 5]: + pct = 100 * (arr == length).sum() / len(arr) + print(f" Length {length}: {pct:.1f}%") + pct_long = 100 * (arr > 5).sum() / len(arr) + print(f" Length >5: {pct_long:.1f}%") + + if total["n_calo_entrants"]: + ce = np.array(total["n_calo_entrants"]) + print(f"\nCalo-entrants per event: mean={ce.mean():.1f}, " + f"median={np.median(ce):.0f}, max={ce.max()}") + + print(f"\nStartCode distribution (top 10):") + for code, count in total["startcode_dist"].most_common(10): + pct = 100 * count / total["total_simps"] + print(f" {code:>4d}: {count:>8d} ({pct:.1f}%)") + + print(f"\n{'='*60}") + print("TRUTH COMPARISON (old SimParticle vs new calo-entrant)") + print(f"{'='*60}") + nh = total["total_hits"] + print(f"Total hits: {nh}") + print(f"Old ambiguous: {total['old_ambiguous']} ({100*total['old_ambiguous']/max(nh,1):.2f}%)") + print(f"New ambiguous: {total['new_ambiguous']} ({100*total['new_ambiguous']/max(nh,1):.2f}%)") + oa = total["old_ambiguous"] + na = total["new_ambiguous"] + if oa > 0: + print(f"Ambiguity reduction: {oa-na} ({100*(oa-na)/oa:.1f}%)") + + oc = total["old_clusters"] + nc = total["new_clusters"] + os_ = total["old_singletons"] + ns = total["new_singletons"] + print(f"\nOld clusters: {oc} (singletons: {os_}, {100*os_/max(oc,1):.1f}%)") + print(f"New clusters: {nc} (singletons: {ns}, {100*ns/max(nc,1):.1f}%)") + print(f"Cluster reduction: {oc-nc} ({100*(oc-nc)/max(oc,1):.1f}%)") + if os_ > 0: + print(f"Singleton reduction: {os_-ns} ({100*(os_-ns)/os_:.1f}%)") + + # Sanity check: new clusters should only merge old ones, never split + if nc > oc: + print("\n*** WARNING: new truth has MORE clusters than old — " + "this should not happen! ***") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/setup_env.sh b/CaloClusterGNN/setup_env.sh new file mode 100644 index 0000000..d970610 --- /dev/null +++ b/CaloClusterGNN/setup_env.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Activate the Mu2e Python environment for the GNN clustering project. +# Source this file, do not execute it: source setup_env.sh + +source /cvmfs/mu2e.opensciencegrid.org/setupmu2e-art.sh +# pyenv is a shell function defined by setupmu2e-art.sh; in non-interactive +# shells it may not be exported, so fall back to sourcing activate directly. +if declare -f pyenv &>/dev/null; then + pyenv ana 2.6.1 +else + source /cvmfs/mu2e.opensciencegrid.org/env/ana/2.6.1/bin/activate +fi + +# User-installed packages (torch_geometric lives here) +export PYTHONPATH="/nashome/w/wzhou2/.local/lib/python3.12/site-packages:${PYTHONPATH}" + +# Add project src to Python path so imports like "from data.dataset import ..." work +export PYTHONPATH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/src:${PYTHONPATH}" + +echo "Environment ready: Python $(python3 --version 2>&1 | cut -d' ' -f2), torch $(python3 -c 'import torch; print(torch.__version__)' 2>/dev/null)" diff --git a/CaloClusterGNN/splits/.gitkeep b/CaloClusterGNN/splits/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/CaloClusterGNN/splits/test_files.txt b/CaloClusterGNN/splits/test_files.txt new file mode 100644 index 0000000..823a9de --- /dev/null +++ b/CaloClusterGNN/splits/test_files.txt @@ -0,0 +1,8 @@ +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000003.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000001.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000198.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000121.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000012.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000019.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000027.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000037.root diff --git a/CaloClusterGNN/splits/train_files.txt b/CaloClusterGNN/splits/train_files.txt new file mode 100644 index 0000000..96192bb --- /dev/null +++ b/CaloClusterGNN/splits/train_files.txt @@ -0,0 +1,35 @@ +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000035.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000065.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000062.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000165.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000020.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000171.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000022.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000060.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000046.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000004.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000029.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000201.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000006.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000009.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000013.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000079.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000042.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000033.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000184.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000007.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000000.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000038.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000111.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000072.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000066.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000089.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000002.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000181.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000053.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000014.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000028.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000024.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000008.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000015.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000130.root diff --git a/CaloClusterGNN/splits/val_files.txt b/CaloClusterGNN/splits/val_files.txt new file mode 100644 index 0000000..5310882 --- /dev/null +++ b/CaloClusterGNN/splits/val_files.txt @@ -0,0 +1,7 @@ +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000059.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000044.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000040.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000056.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000138.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000032.root +/exp/mu2e/data/users/wzhou2/GNN/root_files_v2/nts.wzhou2.description.ancestry_v2.001430_00000051.root diff --git a/CaloClusterGNN/src/__init__.py b/CaloClusterGNN/src/__init__.py new file mode 100644 index 0000000..693a4f5 --- /dev/null +++ b/CaloClusterGNN/src/__init__.py @@ -0,0 +1,13 @@ +"""GNN calorimeter clustering for Mu2e. + +Edge-classification GNN that predicts which calorimeter hit pairs belong +to the same physics cluster. Built on PyTorch Geometric. + +Subpackages +----------- +data Dataset loading, graph construction, truth labels, normalization. +geometry Crystal geometry lookup (crystalId -> disk, x, y). +models GNN architectures (SimpleEdgeNet, CaloClusterNet). +training Training loop, loss functions, evaluation metrics. +inference Cluster reconstruction from edge predictions, postprocessing. +""" diff --git a/CaloClusterGNN/src/data/__init__.py b/CaloClusterGNN/src/data/__init__.py new file mode 100644 index 0000000..ed33cc4 --- /dev/null +++ b/CaloClusterGNN/src/data/__init__.py @@ -0,0 +1,9 @@ +"""Data pipeline: ROOT files -> PyG graphs with MC truth labels. + +Modules +------- +dataset Extract per-disk graphs from ROOT files; CaloGraphDataset loader. +graph_builder Hybrid radius + kNN graph construction using scipy cKDTree. +truth_labels MC truth edge/node labels from SimParticle energy deposits. +normalization Z-score normalization statistics (train split only). +""" diff --git a/CaloClusterGNN/src/data/dataset.py b/CaloClusterGNN/src/data/dataset.py new file mode 100644 index 0000000..7adef79 --- /dev/null +++ b/CaloClusterGNN/src/data/dataset.py @@ -0,0 +1,270 @@ +""" +PyG Dataset for calorimeter hit graphs. + +Reads EventNtuple ROOT files (MDC2025-002 format), extracts per-disk +graphs as PyG Data objects with node/edge features and truth labels. + +Two usage modes: + 1. process_and_save(): build graphs from ROOT files, save as .pt files + 2. CaloGraphDataset: PyG Dataset that loads saved .pt files +""" + +import sys +from pathlib import Path + +import numpy as np +import torch +from torch_geometric.data import Data, Dataset + +from src.data.graph_builder import ( + build_graph, + compute_edge_features, + compute_node_features, +) +from src.data.truth_labels_primary import assign_mc_truth_primary, build_calo_root_map +from src.geometry.crystal_geometry import load_crystal_map + + +# MDC2025-002 branches +_BRANCHES = [ + "calohits.crystalId_", + "calohits.eDep_", + "calohits.time_", + "calohits.crystalPos_.fCoordinates.fX", + "calohits.crystalPos_.fCoordinates.fY", + "calohitsmc.simParticleIds", + "calohitsmc.eDeps", + "calohitsmc.nsim", + # v2 ancestry branches (for calo-entrant truth) + "calomcsim.id", + "calomcsim.ancestorSimIds", +] + + +def extract_events_from_file(filepath, crystal_map, graph_cfg, max_events=None): + """Read one ROOT file and yield PyG Data objects (one per disk per event). + + Parameters + ---------- + filepath : str or Path + Path to ROOT file. + crystal_map : dict + crystalId -> (diskId, x_mm, y_mm). + graph_cfg : dict + Graph construction parameters (r_max_mm, dt_max_ns, k_min, k_max). + max_events : int or None + Limit number of events to read. + + Yields + ------ + data : torch_geometric.data.Data + Per-disk graph with node/edge features and labels. + event_idx : int + disk_id : int + diagnostics : dict + """ + import uproot + + tree = uproot.open(str(filepath) + ":EventNtuple/ntuple") + arrays = tree.arrays(_BRANCHES, entry_stop=max_events) + n_events = len(arrays) + + r_max = graph_cfg.get("r_max_mm", 150.0) + dt_max = graph_cfg.get("dt_max_ns", 25.0) + k_min = graph_cfg.get("k_min", 3) + k_max = graph_cfg.get("k_max", 20) + + for ev in range(n_events): + nhits = len(arrays["calohits.crystalId_"][ev]) + if nhits == 0: + continue + + cryids = np.array(arrays["calohits.crystalId_"][ev], dtype=np.int64) + energies = np.array(arrays["calohits.eDep_"][ev], dtype=np.float64) + times = np.array(arrays["calohits.time_"][ev], dtype=np.float64) + + # Get positions — prefer in-file crystal positions (MDC2025-002) + has_pos = "calohits.crystalPos_.fCoordinates.fX" in arrays.fields + if has_pos: + xs = np.array(arrays["calohits.crystalPos_.fCoordinates.fX"][ev], dtype=np.float64) + ys = np.array(arrays["calohits.crystalPos_.fCoordinates.fY"][ev], dtype=np.float64) + else: + xs = np.zeros(nhits, dtype=np.float64) + ys = np.zeros(nhits, dtype=np.float64) + + # Get disk per hit from crystal geometry + disks = np.array([crystal_map[int(c)][0] if int(c) in crystal_map else -1 + for c in cryids], dtype=np.int64) + + # Fall back to crystal_map positions if in-file positions are all zero + if not has_pos or (np.all(xs == 0) and np.all(ys == 0)): + for i, c in enumerate(cryids): + c = int(c) + if c in crystal_map: + _, xs[i], ys[i] = crystal_map[c] + + # MC truth data + sim_ids = arrays["calohitsmc.simParticleIds"][ev] + edeps_mc = arrays["calohitsmc.eDeps"][ev] + + # v2 ancestry data for calo-entrant truth + calomcsim_ids = arrays["calomcsim.id"][ev] + calomcsim_ancestors = arrays["calomcsim.ancestorSimIds"][ev] + + # Build crystal->disk map for this event's crystals + crystal_disk_map = {int(c): crystal_map[int(c)][0] + for c in cryids if int(c) in crystal_map} + + # Build calo-entrant root map (per event, used by all disks) + calo_root_map = build_calo_root_map( + calomcsim_ids, calomcsim_ancestors, + sim_ids, cryids, crystal_disk_map, + ) + + # Process each disk separately + for disk_id in [0, 1]: + disk_mask = disks == disk_id + n_disk = disk_mask.sum() + if n_disk < 2: + continue + + d_energies = energies[disk_mask] + d_times = times[disk_mask] + d_xs = xs[disk_mask] + d_ys = ys[disk_mask] + d_positions = np.stack([d_xs, d_ys], axis=1) + + # Build graph + edge_index, diag = build_graph( + d_positions, d_times, + r_max=r_max, dt_max=dt_max, k_min=k_min, k_max=k_max, + ) + + if edge_index.shape[1] == 0: + continue + + # Node features (6-dim) + node_feat = compute_node_features(d_positions, d_times, d_energies) + # Edge features (8-dim) + edge_feat = compute_edge_features(d_positions, d_times, d_energies, edge_index) + + # Truth labels (MC truth from SimParticle IDs) + disk_indices = np.where(disk_mask)[0] + d_sim_ids = [list(sim_ids[i]) for i in disk_indices] + d_edeps_mc = [list(edeps_mc[i]) for i in disk_indices] + d_disks = np.full(n_disk, disk_id, dtype=np.int64) + + y_edge, edge_mask, hit_truth_cluster, is_ambiguous = assign_mc_truth_primary( + d_sim_ids, d_edeps_mc, d_disks, edge_index, calo_root_map, + ) + # Node saliency label: 1 = multi-hit truth cluster, 0 = singleton or ambiguous. + # Singletons are stray pileup hits (10-30 MeV) that cause bridge merges. + y_node = np.zeros(n_disk, dtype=np.int64) + for cid in np.unique(hit_truth_cluster): + if cid < 0: + continue + mask = hit_truth_cluster == cid + if mask.sum() >= 2: + y_node[mask] = 1 + + # Build PyG Data + data = Data( + x=torch.from_numpy(node_feat), + edge_index=torch.from_numpy(edge_index), + edge_attr=torch.from_numpy(edge_feat), + y_edge=torch.from_numpy(y_edge), + edge_mask=torch.from_numpy(edge_mask), + y_node=torch.from_numpy(y_node), + hit_truth_cluster=torch.from_numpy(hit_truth_cluster), + # Metadata + n_hits=n_disk, + disk_id=disk_id, + ) + + yield data, ev, disk_id, diag + + +class CaloGraphDataset(Dataset): + """PyG Dataset that loads pre-built .pt graph files. + + Parameters + ---------- + processed_dir : str or Path + Directory containing event_XXXXX_disk_Y.pt files. + file_list : list[str] or None + If given, only load graphs from these source ROOT files + (for split-aware loading). Matches on source filename stem. + preload : bool + If True, load all graphs into memory on init. Much faster for + training when the dataset fits in RAM (~7 KB/graph). + """ + + def __init__(self, processed_dir, file_list=None, transform=None, + preload=False, packed_path=None): + self._processed_dir = Path(processed_dir) + + # Don't call super().__init__() — we manage our own file list. + # But PyG Dataset needs certain attributes, so set them manually. + self._indices = None + self.transform = transform + self.pre_transform = None + self.pre_filter = None + self._cache = None + + # Fast path: load from a single packed file (e.g. train.pt) + if packed_path is not None and Path(packed_path).exists(): + print(f" Loading packed file {packed_path}...", + end=" ", flush=True) + self._cache = torch.load(packed_path, weights_only=False) + self._files = list(range(len(self._cache))) # dummy + print(f"done ({len(self._cache)} graphs).") + return + + # Standard path: discover individual .pt files + all_files = sorted(self._processed_dir.glob("*.pt")) + # Exclude packed split files from individual file listing + packed_names = {"train.pt", "val.pt", "test.pt"} + all_files = [f for f in all_files if f.name not in packed_names] + + if file_list is not None: + allowed_stems = set() + for f in file_list: + allowed_stems.add(Path(f).stem) + self._files = [f for f in all_files + if self._source_stem(f) in allowed_stems] + else: + self._files = all_files + + if preload: + print(f" Preloading {len(self._files)} graphs into memory...", + end=" ", flush=True) + self._cache = [torch.load(f, weights_only=False) + for f in self._files] + print("done.") + + @staticmethod + def _source_stem(pt_path): + """Extract source ROOT file stem from .pt filename.""" + # Filename format: {source_stem}_evt{N}_disk{D}.pt + name = pt_path.stem + # Find last _evt and strip from there + idx = name.rfind("_evt") + return name[:idx] if idx >= 0 else name + + def len(self): + """Return the number of graphs in the dataset.""" + return len(self._files) + + def get(self, idx): + """Load and return the graph at index ``idx``. + + Returns from in-memory cache if preloaded, otherwise reads from disk. + """ + if self._cache is not None: + return self._cache[idx] + return torch.load(self._files[idx], weights_only=False) + + @property + def file_paths(self): + """List of .pt file paths backing this dataset.""" + return self._files diff --git a/CaloClusterGNN/src/data/graph_builder.py b/CaloClusterGNN/src/data/graph_builder.py new file mode 100644 index 0000000..c3a05e8 --- /dev/null +++ b/CaloClusterGNN/src/data/graph_builder.py @@ -0,0 +1,250 @@ +""" +Graph construction for calorimeter hit clustering. + +Builds per-disk graphs using a hybrid radius + kNN strategy: + 1. Radius graph: connect hits within r_max (mm) in disk-local (x,y) + 2. Time filter: drop edges with |Δt| > dt_max (ns) + 3. kNN fallback: isolated nodes get edges to k_min nearest time-compatible neighbors + 4. Degree cap: keep at most k_max nearest neighbors per node + +Uses scipy.spatial.cKDTree (torch-cluster is NOT available). +""" + +import numpy as np +from scipy.spatial import cKDTree + + +def build_graph(positions, times, r_max=150.0, dt_max=25.0, + k_min=3, k_max=20): + """Build a graph for one disk of one event. + + Parameters + ---------- + positions : ndarray, shape (n, 2) + Hit (x, y) positions in disk-local frame (mm). + times : ndarray, shape (n,) + Hit times (ns). + r_max : float + Spatial radius cutoff (mm). + dt_max : float + Maximum |Δt| for any edge (ns). + k_min : int + Minimum neighbors for isolated nodes (kNN fallback). + k_max : int + Maximum degree cap per node. + + Returns + ------- + edge_index : ndarray, shape (2, n_edges) + Directed edge list (src, dst). + diagnostics : dict + Graph statistics: n_nodes, n_edges, avg_degree, n_isolated, + min_degree, max_degree. + """ + n = len(positions) + if n == 0: + return np.empty((2, 0), dtype=np.int64), _empty_diagnostics(0) + if n == 1: + return np.empty((2, 0), dtype=np.int64), _empty_diagnostics(1) + + positions = np.asarray(positions, dtype=np.float64) + times = np.asarray(times, dtype=np.float64) + + tree = cKDTree(positions) + + # Step 1: radius graph + pairs = tree.query_pairs(r=r_max, output_type='ndarray') + if len(pairs) == 0: + src_list, dst_list = [], [] + else: + # Filter by time + dt = np.abs(times[pairs[:, 0]] - times[pairs[:, 1]]) + mask = dt <= dt_max + pairs = pairs[mask] + # Make directed (both directions) + src_list = list(pairs[:, 0]) + list(pairs[:, 1]) + dst_list = list(pairs[:, 1]) + list(pairs[:, 0]) + + # Track degree per node + degree = np.zeros(n, dtype=np.int64) + for s in src_list: + degree[s] += 1 + + # Step 2: kNN fallback for isolated/low-degree nodes + isolated = np.where(degree < k_min)[0] + if len(isolated) > 0: + # Query more neighbors than needed to account for time filtering + k_query = min(k_min * 3, n) + dists, indices = tree.query(positions[isolated], k=k_query) + + for local_i, node_i in enumerate(isolated): + added = 0 + for j_pos in range(k_query): + node_j = indices[local_i, j_pos] + if node_j == node_i: + continue + if np.abs(times[node_i] - times[node_j]) > dt_max: + continue + # Add edge if not already present + src_list.append(node_i) + dst_list.append(node_j) + src_list.append(node_j) + dst_list.append(node_i) + added += 1 + if degree[node_i] + added >= k_min: + break + + # Build edge_index and deduplicate + if not src_list: + return np.empty((2, 0), dtype=np.int64), _empty_diagnostics(n) + + edge_index = np.stack([np.array(src_list, dtype=np.int64), + np.array(dst_list, dtype=np.int64)]) + edge_index = _deduplicate(edge_index) + + # Step 3: degree cap — keep k_max nearest per node + if k_max is not None and k_max > 0: + edge_index = _cap_degree(edge_index, positions, k_max) + + diagnostics = _compute_diagnostics(edge_index, n) + return edge_index, diagnostics + + +def _deduplicate(edge_index): + """Remove duplicate directed edges, keeping the first occurrence.""" + combined = edge_index[0] * (edge_index[1].max() + 1) + edge_index[1] + _, unique_idx = np.unique(combined, return_index=True) + return edge_index[:, unique_idx] + + +def _cap_degree(edge_index, positions, k_max): + """Keep at most k_max nearest neighbors per source node.""" + src, dst = edge_index + n = max(src.max(), dst.max()) + 1 + + # Compute distances for all edges + dx = positions[src, 0] - positions[dst, 0] + dy = positions[src, 1] - positions[dst, 1] + dists = np.sqrt(dx**2 + dy**2) + + keep = np.ones(len(src), dtype=bool) + + for node in range(n): + node_mask = src == node + if node_mask.sum() <= k_max: + continue + node_indices = np.where(node_mask)[0] + node_dists = dists[node_indices] + # Keep k_max nearest + sorted_idx = np.argsort(node_dists) + drop = node_indices[sorted_idx[k_max:]] + keep[drop] = False + + return edge_index[:, keep] + + +def _compute_diagnostics(edge_index, n_nodes): + """Compute graph statistics: node/edge counts, degree distribution.""" + if edge_index.shape[1] == 0: + return _empty_diagnostics(n_nodes) + + degree = np.bincount(edge_index[0], minlength=n_nodes) + return { + "n_nodes": n_nodes, + "n_edges": edge_index.shape[1], + "avg_degree": float(degree.mean()), + "min_degree": int(degree.min()), + "max_degree": int(degree.max()), + "n_isolated": int((degree == 0).sum()), + } + + +def _empty_diagnostics(n_nodes): + """Return zero-valued diagnostics dict for a graph with no edges.""" + return { + "n_nodes": n_nodes, + "n_edges": 0, + "avg_degree": 0.0, + "min_degree": 0, + "max_degree": 0, + "n_isolated": n_nodes, + } + + +def compute_edge_features(positions, times, energies, edge_index): + """Compute 8-dim edge features for directed edges. + + Parameters + ---------- + positions : ndarray, shape (n, 2) + Hit (x, y) in mm. + times : ndarray, shape (n,) + Hit times in ns. + energies : ndarray, shape (n,) + Hit energies in MeV. + edge_index : ndarray, shape (2, n_edges) + Directed edges. + + Returns + ------- + edge_attr : ndarray, shape (n_edges, 8) + """ + src, dst = edge_index + if len(src) == 0: + return np.empty((0, 8), dtype=np.float32) + + x_s, y_s = positions[src, 0], positions[src, 1] + x_d, y_d = positions[dst, 0], positions[dst, 1] + t_s, t_d = times[src], times[dst] + e_s, e_d = energies[src], energies[dst] + + dx = x_s - x_d + dy = y_s - y_d + dist = np.sqrt(dx**2 + dy**2) + dt = t_s - t_d + + log_e_s = np.log1p(e_s) + log_e_d = np.log1p(e_d) + d_log_e = log_e_s - log_e_d + + e_sum = e_s + e_d + e_asym = np.where(e_sum > 0, (e_s - e_d) / e_sum, 0.0) + log_sum_e = np.log1p(e_sum) + + r_s = np.sqrt(x_s**2 + y_s**2) + r_d = np.sqrt(x_d**2 + y_d**2) + dr = r_s - r_d + + edge_attr = np.stack([dx, dy, dist, dt, d_log_e, e_asym, log_sum_e, dr], + axis=1).astype(np.float32) + return edge_attr + + +def compute_node_features(positions, times, energies): + """Compute 6-dim node features. + + Parameters + ---------- + positions : ndarray, shape (n, 2) + Hit (x, y) in mm. + times : ndarray, shape (n,) + Hit times in ns. + energies : ndarray, shape (n,) + Hit energies in MeV. + + Returns + ------- + node_feat : ndarray, shape (n, 6) + """ + n = len(energies) + if n == 0: + return np.empty((0, 6), dtype=np.float32) + + log_e = np.log1p(energies) + r = np.sqrt(positions[:, 0]**2 + positions[:, 1]**2) + e_max = energies.max() + rel_e = energies / e_max if e_max > 0 else np.zeros(n) + + node_feat = np.stack([log_e, times, positions[:, 0], positions[:, 1], + r, rel_e], axis=1).astype(np.float32) + return node_feat diff --git a/CaloClusterGNN/src/data/normalization.py b/CaloClusterGNN/src/data/normalization.py new file mode 100644 index 0000000..06880d6 --- /dev/null +++ b/CaloClusterGNN/src/data/normalization.py @@ -0,0 +1,109 @@ +""" +Compute and apply global normalization statistics for node and edge features. + +Statistics are computed from the TRAINING split only, then applied to all splits. +Saved to data/normalization_stats.pt. +""" + +from pathlib import Path + +import torch +import numpy as np + + +def compute_normalization_stats(dataset): + """Compute per-feature mean and std from a dataset of PyG Data objects. + + Parameters + ---------- + dataset : iterable of Data + Training split graphs. + + Returns + ------- + stats : dict + Keys: node_mean, node_std, edge_mean, edge_std (each a Tensor). + """ + node_sum = None + node_sq_sum = None + node_count = 0 + + edge_sum = None + edge_sq_sum = None + edge_count = 0 + + for data in dataset: + x = data.x.double() + n = x.shape[0] + if node_sum is None: + node_sum = torch.zeros(x.shape[1], dtype=torch.float64) + node_sq_sum = torch.zeros(x.shape[1], dtype=torch.float64) + node_sum += x.sum(dim=0) + node_sq_sum += (x ** 2).sum(dim=0) + node_count += n + + ea = data.edge_attr.double() + m = ea.shape[0] + if edge_sum is None: + edge_sum = torch.zeros(ea.shape[1], dtype=torch.float64) + edge_sq_sum = torch.zeros(ea.shape[1], dtype=torch.float64) + edge_sum += ea.sum(dim=0) + edge_sq_sum += (ea ** 2).sum(dim=0) + edge_count += m + + if node_count == 0: + raise ValueError("No data to compute normalization stats.") + + node_mean = (node_sum / node_count).float() + node_std = torch.sqrt(node_sq_sum / node_count - node_mean.double() ** 2).float() + node_std = torch.clamp(node_std, min=1e-6) + + edge_mean = (edge_sum / edge_count).float() + edge_std = torch.sqrt(edge_sq_sum / edge_count - edge_mean.double() ** 2).float() + edge_std = torch.clamp(edge_std, min=1e-6) + + return { + "node_mean": node_mean, + "node_std": node_std, + "edge_mean": edge_mean, + "edge_std": edge_std, + "node_count": node_count, + "edge_count": edge_count, + } + + +def save_stats(stats, path="data/normalization_stats.pt"): + """Save normalization stats to disk.""" + path = Path(path) + path.parent.mkdir(parents=True, exist_ok=True) + torch.save(stats, path) + print(f"Saved normalization stats to {path}") + print(f" Node features: {stats['node_mean'].shape[0]}-dim, " + f"from {stats['node_count']} nodes") + print(f" Edge features: {stats['edge_mean'].shape[0]}-dim, " + f"from {stats['edge_count']} edges") + + +def load_stats(path="data/normalization_stats.pt"): + """Load normalization stats from disk.""" + return torch.load(path, weights_only=True) + + +def normalize_graph(data, stats): + """Apply z-score normalization to a PyG Data object (in-place). + + Parameters + ---------- + data : Data + Graph with x (node features) and edge_attr (edge features). + stats : dict + From compute_normalization_stats / load_stats. + + Returns + ------- + data : Data + Same object, modified in-place. + """ + data.x = (data.x - stats["node_mean"]) / stats["node_std"] + data.edge_attr = (data.edge_attr - stats["edge_mean"]) / stats["edge_std"] + return data diff --git a/CaloClusterGNN/src/data/truth_labels.py b/CaloClusterGNN/src/data/truth_labels.py new file mode 100644 index 0000000..58f07ef --- /dev/null +++ b/CaloClusterGNN/src/data/truth_labels.py @@ -0,0 +1,86 @@ +"""Truth labeling for edge classification in GNN calorimeter clustering. + +Uses SimParticle IDs and energy deposits from calohitsmc (MDC2025-002) +to assign per-hit truth clusters and per-edge truth labels. +""" + +import numpy as np + + +def assign_mc_truth(sim_particle_ids, edeps, hit_disks, edge_index, + purity_threshold=0.7): + """Assign edge labels from MC truth SimParticle information. + + For each hit the dominant SimParticle is the one with the largest energy + deposit. A hit is *ambiguous* when the dominant particle's share of the + total deposit is below ``purity_threshold``. + + Truth clusters group non-ambiguous hits that share the same dominant + SimParticle **and** the same disk. + + Args: + sim_particle_ids: list of lists — per-hit SimParticle IDs. + edeps: list of lists — per-hit energy deposits (aligned with + sim_particle_ids). + hit_disks: np.ndarray of shape (n_hits,), int. + edge_index: np.ndarray of shape (2, n_edges), int. + purity_threshold: float, default 0.7. + + Returns: + y_edge: np.ndarray of shape (n_edges,), int (0 or 1). + edge_mask: np.ndarray of shape (n_edges,), bool (True = valid). + hit_truth_cluster: np.ndarray of shape (n_hits,), int. + Unique truth-cluster ID per hit; -1 for ambiguous hits. + is_ambiguous: np.ndarray of shape (n_hits,), bool. + """ + hit_disks = np.asarray(hit_disks) + edge_index = np.asarray(edge_index) + n_hits = len(sim_particle_ids) + + dominant_pid = np.full(n_hits, -1, dtype=np.int64) + is_ambiguous = np.ones(n_hits, dtype=bool) + + for i in range(n_hits): + pids = sim_particle_ids[i] + deps = edeps[i] + if len(pids) == 0 or len(deps) == 0: + # No SimParticle info → ambiguous + continue + deps_arr = np.asarray(deps, dtype=np.float64) + total = deps_arr.sum() + if total <= 0: + continue + best = np.argmax(deps_arr) + purity = deps_arr[best] / total + if purity >= purity_threshold: + is_ambiguous[i] = False + dominant_pid[i] = pids[best] + + # Build truth cluster IDs: unique (dominant_pid, disk) pairs for + # non-ambiguous hits. + hit_truth_cluster = np.full(n_hits, -1, dtype=np.int64) + cluster_map = {} # (pid, disk) -> cluster_id + next_id = 0 + for i in range(n_hits): + if is_ambiguous[i]: + continue + key = (int(dominant_pid[i]), int(hit_disks[i])) + if key not in cluster_map: + cluster_map[key] = next_id + next_id += 1 + hit_truth_cluster[i] = cluster_map[key] + + # Edge labels + src = edge_index[0] + dst = edge_index[1] + + amb_src = is_ambiguous[src] + amb_dst = is_ambiguous[dst] + edge_mask = ~(amb_src | amb_dst) # valid only if both non-ambiguous + + tc_src = hit_truth_cluster[src] + tc_dst = hit_truth_cluster[dst] + y_edge = ((tc_src == tc_dst) & (tc_src != -1)).astype(np.int64) + y_edge[~edge_mask] = 0 + + return y_edge, edge_mask, hit_truth_cluster, is_ambiguous diff --git a/CaloClusterGNN/src/data/truth_labels_primary.py b/CaloClusterGNN/src/data/truth_labels_primary.py new file mode 100644 index 0000000..ac4cec3 --- /dev/null +++ b/CaloClusterGNN/src/data/truth_labels_primary.py @@ -0,0 +1,171 @@ +"""Primary-level (calo-entrant) truth labeling for GNN calorimeter clustering. + +Redefines truth clusters at the calo-entrant level: all hits from the +same electromagnetic shower are assigned to the same truth cluster, +instead of being split by individual SimParticle ID. + +The calo-entrant for a SimParticle on disk D is the highest ancestor in +its Geant4 parent chain that also deposited energy in disk D. If no +ancestor deposited in disk D, the particle itself is the calo-entrant. + +This collapses secondary shower products (bremsstrahlung photons, etc.) +into their parent shower, eliminating artificial singleton truth clusters +and reducing ambiguous hit counts. + +Requires v2 ROOT files with ``calomcsim.ancestorSimIds`` branch. +""" + +import numpy as np + + +def build_calo_root_map(sim_particle_ids_evt, ancestor_ids_evt, + hit_sim_ids, hit_crystal_ids, crystal_disk_map): + """Build a mapping from (SimParticle, disk) -> calo-entrant root. + + The calo-entrant root for a SimParticle on a given disk is the highest + ancestor that also deposited energy in that disk. Cross-disk secondaries + become their own calo-entrant. + + Parameters + ---------- + sim_particle_ids_evt : array-like + ``calomcsim.id`` for one event — SimParticle IDs present in calo. + ancestor_ids_evt : list of array-like + ``calomcsim.ancestorSimIds`` for one event — per-SimParticle + ancestor chains (ordered child→root). + hit_sim_ids : list of array-like + ``calohitsmc.simParticleIds`` for one event — per-hit contributing + SimParticle IDs. + hit_crystal_ids : array-like + ``calohits.crystalId_`` for one event. + crystal_disk_map : dict + crystalId -> diskId. + + Returns + ------- + calo_root_map : dict + ``{(simParticle_id, disk_id): calo_entrant_id}`` + """ + sim_ids_set = set(int(x) for x in sim_particle_ids_evt) + + # Determine which disks each SimParticle deposits in, from hit data. + simp_disks = {} # simP_id -> set of disk_ids + for i in range(len(hit_sim_ids)): + cryid = int(hit_crystal_ids[i]) + disk = crystal_disk_map.get(cryid, -1) + if disk < 0: + continue + for pid in hit_sim_ids[i]: + pid = int(pid) + if pid not in simp_disks: + simp_disks[pid] = set() + simp_disks[pid].add(disk) + + # For each (SimParticle, disk), walk up ancestor chain to find + # the highest ancestor that also deposited in the same disk. + calo_root_map = {} + for j in range(len(sim_particle_ids_evt)): + sid = int(sim_particle_ids_evt[j]) + ancestors = [int(a) for a in ancestor_ids_evt[j]] + for disk in simp_disks.get(sid, set()): + calo_root = sid + for a in ancestors: + if a in sim_ids_set and disk in simp_disks.get(a, set()): + calo_root = a + calo_root_map[(sid, disk)] = calo_root + + return calo_root_map + + +def assign_mc_truth_primary(sim_particle_ids, edeps, hit_disks, + edge_index, calo_root_map, + purity_threshold=0.7): + """Assign edge labels using calo-entrant (primary-level) truth. + + Like :func:`assign_mc_truth` but groups energy deposits by calo-entrant + root before computing purity. This means that multiple SimParticle + contributions from the same shower sum together, reducing ambiguity + and collapsing secondary singletons into parent showers. + + Parameters + ---------- + sim_particle_ids : list of lists + Per-hit SimParticle IDs. + edeps : list of lists + Per-hit energy deposits (aligned with *sim_particle_ids*). + hit_disks : np.ndarray of shape (n_hits,), int + Disk ID per hit. + edge_index : np.ndarray of shape (2, n_edges), int + Graph edge index. + calo_root_map : dict + ``{(simParticle_id, disk_id): calo_entrant_id}`` from + :func:`build_calo_root_map`. + purity_threshold : float + Minimum dominant calo-entrant purity to consider a hit + non-ambiguous. Default 0.7. + + Returns + ------- + y_edge : np.ndarray of shape (n_edges,), int (0 or 1) + edge_mask : np.ndarray of shape (n_edges,), bool (True = valid) + hit_truth_cluster : np.ndarray of shape (n_hits,), int + Truth-cluster ID per hit; -1 for ambiguous hits. + is_ambiguous : np.ndarray of shape (n_hits,), bool + """ + hit_disks = np.asarray(hit_disks) + edge_index = np.asarray(edge_index) + n_hits = len(sim_particle_ids) + + dominant_root = np.full(n_hits, -1, dtype=np.int64) + is_ambiguous = np.ones(n_hits, dtype=bool) + + for i in range(n_hits): + pids = sim_particle_ids[i] + deps = edeps[i] + if len(pids) == 0 or len(deps) == 0: + continue + deps_arr = np.asarray(deps, dtype=np.float64) + total = deps_arr.sum() + if total <= 0: + continue + + disk = int(hit_disks[i]) + + # Group energy deposits by calo-entrant root + root_edep = {} + for pid, dep in zip(pids, deps): + root = calo_root_map.get((int(pid), disk), int(pid)) + root_edep[root] = root_edep.get(root, 0.0) + float(dep) + + best_root = max(root_edep, key=root_edep.get) + purity = root_edep[best_root] / total + if purity >= purity_threshold: + is_ambiguous[i] = False + dominant_root[i] = best_root + + # Build truth cluster IDs: unique (calo_root, disk) pairs + hit_truth_cluster = np.full(n_hits, -1, dtype=np.int64) + cluster_map = {} + next_id = 0 + for i in range(n_hits): + if is_ambiguous[i]: + continue + key = (int(dominant_root[i]), int(hit_disks[i])) + if key not in cluster_map: + cluster_map[key] = next_id + next_id += 1 + hit_truth_cluster[i] = cluster_map[key] + + # Edge labels + src = edge_index[0] + dst = edge_index[1] + amb_src = is_ambiguous[src] + amb_dst = is_ambiguous[dst] + edge_mask = ~(amb_src | amb_dst) + + tc_src = hit_truth_cluster[src] + tc_dst = hit_truth_cluster[dst] + y_edge = ((tc_src == tc_dst) & (tc_src != -1)).astype(np.int64) + y_edge[~edge_mask] = 0 + + return y_edge, edge_mask, hit_truth_cluster, is_ambiguous diff --git a/CaloClusterGNN/src/geometry/README.md b/CaloClusterGNN/src/geometry/README.md new file mode 100644 index 0000000..800caf6 --- /dev/null +++ b/CaloClusterGNN/src/geometry/README.md @@ -0,0 +1,24 @@ +# Crystal Geometry (Task 0) + +This module provides crystal position and neighbor lookups for the Mu2e calorimeter. + +## Data files + +- `data/crystal_map_raw.csv` — Full crystal/SiPM channel map (2740 rows). Source of truth. +- `data/crystal_geometry.csv` — Derived: `crystalId,diskId,x_mm,y_mm` for 1348 crystals (1344 CAL + 4 CAPHRI). +- `data/crystal_neighbors.csv` — Derived: `crystalId,neighbors` (immediate geometric neighbors within 1.5× crystal pitch ≈ 51.5 mm). + +All three files are static for the MDC2020 geometry and committed to the repo. + +## CAPHRI crystals + +4 crystal bars (IDs 582, 609, 610, 637) are **not CsI** — they are CAPHRI type, all on disk 0. + +## Python API + +```python +from src.geometry.crystal_geometry import load_crystal_map, load_neighbor_map + +crystal_map = load_crystal_map() # {crystalId: (diskId, x_mm, y_mm)} +neighbor_map = load_neighbor_map() # {crystalId: [neighbor_ids]} +``` diff --git a/CaloClusterGNN/src/geometry/__init__.py b/CaloClusterGNN/src/geometry/__init__.py new file mode 100644 index 0000000..73ffb2d --- /dev/null +++ b/CaloClusterGNN/src/geometry/__init__.py @@ -0,0 +1,6 @@ +"""Crystal geometry for the Mu2e electromagnetic calorimeter. + +Provides fast lookup from crystalId to (diskId, x_mm, y_mm) and +crystal neighbor adjacency, loaded from static CSV files derived +from the Offline geometry service. +""" diff --git a/CaloClusterGNN/src/geometry/crystal_geometry.py b/CaloClusterGNN/src/geometry/crystal_geometry.py new file mode 100644 index 0000000..b4a3495 --- /dev/null +++ b/CaloClusterGNN/src/geometry/crystal_geometry.py @@ -0,0 +1,64 @@ +""" +Crystal geometry loader. + +Reads data/crystal_geometry.csv (derived from data/crystal_map_raw.csv) +and provides fast lookup from crystalId to (disk, x, y) in the disk-local frame. +""" + +import csv +from pathlib import Path + +# Default path relative to project root +_DEFAULT_CSV = Path(__file__).parents[2] / "data" / "crystal_geometry.csv" +_DEFAULT_NEIGHBORS_CSV = Path(__file__).parents[2] / "data" / "crystal_neighbors.csv" + + +def load_crystal_map(csv_path=None): + """ + Load crystalId -> (diskId, x_mm, y_mm) from the geometry CSV. + + Returns + ------- + dict[int, tuple[int, float, float]] + Mapping from global crystalId to (diskId, x_mm, y_mm). + """ + path = Path(csv_path or _DEFAULT_CSV) + if not path.exists(): + raise FileNotFoundError( + f"Crystal geometry file not found: {path}\n" + "Run the geometry dump first (see src/geometry/README.md)." + ) + crystal_map = {} + with open(path) as f: + reader = csv.DictReader(f) + for row in reader: + cid = int(row["crystalId"]) + crystal_map[cid] = (int(row["diskId"]), float(row["x_mm"]), float(row["y_mm"])) + return crystal_map + + +def load_neighbor_map(csv_path=None): + """ + Load crystalId -> list[neighbor_crystalId] from the neighbors CSV. + + Includes both immediate neighbors and next-ring neighbors + (neighbors() + nextNeighbors() from the Offline geometry service). + + Returns + ------- + dict[int, list[int]] + """ + path = Path(csv_path or _DEFAULT_NEIGHBORS_CSV) + if not path.exists(): + raise FileNotFoundError( + f"Crystal neighbors file not found: {path}\n" + "Run the geometry dump first (see src/geometry/README.md)." + ) + neighbor_map = {} + with open(path) as f: + reader = csv.DictReader(f) + for row in reader: + cid = int(row["crystalId"]) + neighbors = [int(n) for n in row["neighbors"].split(";") if n] + neighbor_map[cid] = neighbors + return neighbor_map diff --git a/CaloClusterGNN/src/inference/__init__.py b/CaloClusterGNN/src/inference/__init__.py new file mode 100644 index 0000000..9cc0eaa --- /dev/null +++ b/CaloClusterGNN/src/inference/__init__.py @@ -0,0 +1,7 @@ +"""Inference pipeline: edge predictions -> reconstructed clusters. + +Modules +------- +cluster_reco Symmetrize edge scores, threshold, connected components, cleanup. +postprocess Per-cluster physics features (energy, centroid, time, RMS width). +""" diff --git a/CaloClusterGNN/src/inference/cluster_reco.py b/CaloClusterGNN/src/inference/cluster_reco.py new file mode 100644 index 0000000..785586a --- /dev/null +++ b/CaloClusterGNN/src/inference/cluster_reco.py @@ -0,0 +1,319 @@ +""" +Cluster reconstruction from GNN edge predictions. + +Given edge logits from SimpleEdgeNet: + 1. Sigmoid → probabilities + 2. Symmetrize directed edge scores (mean of p_ij and p_ji) + 3. Threshold: keep edges with p > tau_edge + 4. Connected components (or BFS with expand_cut) → cluster labels + 5. Cleanup: remove clusters below min_hits or min_energy +""" + +from collections import deque + +import numpy as np +import torch +from scipy.sparse import coo_matrix +from scipy.sparse.csgraph import connected_components + + +def _bfs_expand_cut(adj_list, energies, expand_cut): + """BFS traversal with expand_cut — mirrors Offline's ClusterFinder. + + Seeds from highest-energy hits. Each hit is added to the cluster of + its seed, but only hits with energy >= expand_cut continue the BFS + expansion. Low-energy hits join but don't recruit neighbors. + + This matches BFS's ExpandCut behavior: a low-energy hit is a leaf in + the traversal, not a bridge. + + Parameters + ---------- + adj_list : list of lists + Adjacency list from thresholded GNN edges. + energies : ndarray (N,) + Hit energies in MeV. + expand_cut : float + Minimum energy (MeV) to expand from a hit. + + Returns + ------- + labels : ndarray (N,) int64 + Cluster ID per node. Every node gets assigned (no -1). + """ + n = len(energies) + labels = np.full(n, -1, dtype=np.int64) + cluster_id = 0 + + for seed in np.argsort(-energies): + if labels[seed] >= 0: + continue + queue = deque([seed]) + labels[seed] = cluster_id + while queue: + node = queue.popleft() + if energies[node] < expand_cut: + continue # joins cluster but doesn't expand + for neighbor in adj_list[node]: + if labels[neighbor] < 0: + labels[neighbor] = cluster_id + queue.append(neighbor) + cluster_id += 1 + + return labels + + +def symmetrize_edge_scores(edge_index, edge_probs): + """Average directed edge scores to get undirected scores. + + For each pair (i, j) that appears in both directions, the undirected + score is mean(p_ij, p_ji). Edges that appear in only one direction + keep their original score. + + Parameters + ---------- + edge_index : ndarray (2, E) + edge_probs : ndarray (E,) + + Returns + ------- + edge_index_undir : ndarray (2, E_undir) + Undirected edges (i < j). + edge_probs_undir : ndarray (E_undir,) + """ + src, dst = edge_index[0], edge_index[1] + + # Build dict: (min, max) → list of probs + pair_scores = {} + for k in range(len(src)): + key = (min(src[k], dst[k]), max(src[k], dst[k])) + if key not in pair_scores: + pair_scores[key] = [] + pair_scores[key].append(edge_probs[k]) + + n_undir = len(pair_scores) + ei_undir = np.empty((2, n_undir), dtype=np.int64) + ep_undir = np.empty(n_undir, dtype=np.float64) + + for idx, (key, scores) in enumerate(pair_scores.items()): + ei_undir[0, idx] = key[0] + ei_undir[1, idx] = key[1] + ep_undir[idx] = np.mean(scores) + + return ei_undir, ep_undir + + +def reconstruct_clusters(edge_index, edge_logits, n_nodes, energies=None, + tau_edge=0.5, min_hits=2, min_energy_mev=10.0, + symmetrize=True, node_logits=None, tau_node=None, + expand_cut=None, saliency_prune=False, + bfs_expand_cut=None): + """Reconstruct clusters from edge predictions. + + Parameters + ---------- + edge_index : Tensor or ndarray (2, E) + Directed edge list. + edge_logits : Tensor or ndarray (E,) + Raw logits (pre-sigmoid) from the model. + n_nodes : int + Number of nodes in the graph. + energies : Tensor or ndarray (N,), optional + Hit energies in MeV (raw, not log-transformed). Used for min_energy + cleanup and expand_cut. If None, energy cleanup is skipped. + tau_edge : float + Edge classification threshold on probabilities. + min_hits : int + Minimum hits per cluster. Smaller clusters get label -1. + min_energy_mev : float + Minimum total energy per cluster (MeV). Below-threshold clusters + get label -1. + symmetrize : bool + If True, average directed scores before thresholding. + node_logits : Tensor or ndarray (N,), optional + Raw node saliency logits from CaloClusterNet. If provided with + tau_node, used for bridge suppression and/or post-clustering pruning. + tau_node : float or None + Node saliency threshold. When saliency_prune is False, suppresses + edges where BOTH endpoints are non-salient (bridge suppression). + When saliency_prune is True, removes non-salient hits from clusters + after connected components (post-clustering pruning). + expand_cut : float or None + Analogous to BFS ExpandCut. Edges where BOTH endpoints have + energy below this threshold are suppressed — prevents low-energy + hits from bridging between clusters. Hits below expand_cut can + still join a cluster through an edge to a high-energy hit, but + cannot act as bridges. If None, no expand cut is applied. + saliency_prune : bool + If True, after connected components, remove hits with saliency below + tau_node from their clusters. This trims stray pileup hits that were + absorbed via edges to salient cluster members. + bfs_expand_cut : float or None + If provided, replaces connected_components with BFS traversal that + mirrors Offline's ClusterFinder. Hits with energy >= bfs_expand_cut + expand to neighbors; lower-energy hits join but don't expand (leaves + in the traversal). This preserves completeness (all hits join clusters) + while preventing low-energy strays from bridging between clusters. + Mutually exclusive with expand_cut. + + Returns + ------- + cluster_labels : ndarray (N,) + Integer cluster ID per node. -1 = unclustered. + edge_probs : ndarray (E,) + Sigmoid probabilities (useful for downstream analysis). + """ + # Convert to numpy + if isinstance(edge_index, torch.Tensor): + edge_index = edge_index.cpu().numpy() + if isinstance(edge_logits, torch.Tensor): + edge_logits = edge_logits.detach().cpu().numpy() + if energies is not None and isinstance(energies, torch.Tensor): + energies = energies.cpu().numpy() + if node_logits is not None and isinstance(node_logits, torch.Tensor): + node_logits = node_logits.detach().cpu().numpy() + + edge_probs_raw = 1.0 / (1.0 + np.exp(-edge_logits.astype(np.float64))) + + # Compute node saliency probabilities (used for bridge suppression or pruning) + node_probs = None + if node_logits is not None and tau_node is not None: + node_probs = 1.0 / (1.0 + np.exp(-node_logits.astype(np.float64))) + + # Bridge suppression (pre-clustering): zero out edges where BOTH endpoints + # are non-salient. Only used when saliency_prune is False. + if node_probs is not None and not saliency_prune: + src, dst = edge_index[0], edge_index[1] + both_non_salient = (node_probs[src] < tau_node) & (node_probs[dst] < tau_node) + edge_probs_raw[both_non_salient] = 0.0 + + if symmetrize: + ei_sym, ep_sym = symmetrize_edge_scores(edge_index, edge_probs_raw) + else: + ei_sym, ep_sym = edge_index, edge_probs_raw + + # Threshold + keep = ep_sym >= tau_edge + + cluster_labels = np.full(n_nodes, -1, dtype=np.int64) + + if keep.sum() == 0: + return cluster_labels, edge_probs_raw + + src = ei_sym[0, keep] + dst = ei_sym[1, keep] + + # Expand cut: suppress edges where both endpoints are below threshold. + # Low-energy hits can still join a cluster via an edge to a high-energy + # hit, but two low-energy hits cannot bridge between clusters. + if expand_cut is not None and energies is not None: + both_low = (energies[src] < expand_cut) & (energies[dst] < expand_cut) + src = src[~both_low] + dst = dst[~both_low] + if len(src) == 0: + return cluster_labels, edge_probs_raw + + if bfs_expand_cut is not None and energies is not None: + # BFS traversal with expand_cut (mirrors Offline's ClusterFinder) + adj_list = [[] for _ in range(n_nodes)] + for i in range(len(src)): + adj_list[src[i]].append(dst[i]) + adj_list[dst[i]].append(src[i]) + cluster_labels[:] = _bfs_expand_cut(adj_list, energies, bfs_expand_cut) + else: + # Standard connected components + src_both = np.concatenate([src, dst]) + dst_both = np.concatenate([dst, src]) + vals = np.ones(len(src_both), dtype=np.float32) + adj = coo_matrix((vals, (src_both, dst_both)), shape=(n_nodes, n_nodes)) + n_components, labels = connected_components(adj, directed=False) + cluster_labels[:] = labels + + # Cleanup: min_hits + for cid in np.unique(cluster_labels): + if cid < 0: + continue + mask = cluster_labels == cid + if mask.sum() < min_hits: + cluster_labels[mask] = -1 + + # Cleanup: min_energy + if energies is not None: + for cid in np.unique(cluster_labels): + if cid == -1: + continue + mask = cluster_labels == cid + if energies[mask].sum() < min_energy_mev: + cluster_labels[mask] = -1 + + # Saliency pruning (post-clustering): remove non-salient hits from clusters. + # This trims stray pileup hits that were absorbed into clusters via edges + # to salient cluster members — the failure mode that bridge suppression misses. + if saliency_prune and node_probs is not None: + for i in range(n_nodes): + if cluster_labels[i] >= 0 and node_probs[i] < tau_node: + cluster_labels[i] = -1 + + # Relabel to contiguous IDs (0, 1, 2, ...) + valid_ids = np.unique(cluster_labels[cluster_labels >= 0]) + remap = {old: new for new, old in enumerate(valid_ids)} + result = np.full_like(cluster_labels, -1) + for old, new in remap.items(): + result[cluster_labels == old] = new + cluster_labels = result + + return cluster_labels, edge_probs_raw + + +def predict_clusters(model, data, device="cpu", tau_edge=0.5, + min_hits=2, min_energy_mev=10.0, tau_node=None): + """Run model inference and return cluster labels. + + Convenience wrapper: forward pass → reconstruct_clusters. + + Parameters + ---------- + model : nn.Module + Trained SimpleEdgeNet or CaloClusterNet. + data : torch_geometric.data.Data + Single graph with x, edge_index, edge_attr. + device : str or torch.device + tau_edge, min_hits, min_energy_mev : see reconstruct_clusters. + tau_node : float or None + Node saliency threshold. Only used with models that return + node_logits (CaloClusterNet). + + Returns + ------- + cluster_labels : ndarray (N,) + edge_probs : ndarray (E,) + """ + model.eval() + data_dev = data.clone().to(device) + with torch.no_grad(): + output = model(data_dev) + + if isinstance(output, dict): + logits = output["edge_logits"] + node_logits = output.get("node_logits") + else: + logits = output + node_logits = None + + # Use raw energies if available (node feature 0 is log(1+E)) + energies = None + if data.x is not None: + log_e = data.x[:, 0].cpu().numpy() + energies = np.exp(log_e) - 1.0 + + return reconstruct_clusters( + edge_index=data.edge_index, + edge_logits=logits.cpu(), + n_nodes=data.x.shape[0], + energies=energies, + tau_edge=tau_edge, + min_hits=min_hits, + min_energy_mev=min_energy_mev, + node_logits=node_logits.cpu() if node_logits is not None else None, + tau_node=tau_node, + ) diff --git a/CaloClusterGNN/src/inference/postprocess.py b/CaloClusterGNN/src/inference/postprocess.py new file mode 100644 index 0000000..4dcb5f5 --- /dev/null +++ b/CaloClusterGNN/src/inference/postprocess.py @@ -0,0 +1,109 @@ +""" +Cluster postprocessing: compute per-cluster physics features. + +Given cluster labels and hit-level data, compute per-cluster: + - Hit list, n_hits, total energy + - Energy-weighted centroid (x, y), energy-weighted time + - RMS spatial width, max-hit energy fraction +""" + +import numpy as np + + +def compute_cluster_features(cluster_labels, positions, energies, times): + """Compute physics features for each reconstructed cluster. + + Parameters + ---------- + cluster_labels : ndarray (N,) + Integer cluster ID per hit. -1 = unclustered. + positions : ndarray (N, 2) + Hit positions (x, y) in mm. + energies : ndarray (N,) + Hit energies in MeV. + times : ndarray (N,) + Hit times in ns. + + Returns + ------- + clusters : list of dict + One dict per cluster with keys: + - cluster_id : int + - hit_indices : list of int + - n_hits : int + - total_energy : float (MeV) + - centroid_x, centroid_y : float (energy-weighted, mm) + - time : float (energy-weighted, ns) + - rms_width : float (energy-weighted RMS distance from centroid, mm) + - max_hit_fraction : float (energy of most energetic hit / total) + """ + valid_ids = np.unique(cluster_labels[cluster_labels >= 0]) + clusters = [] + + for cid in valid_ids: + mask = cluster_labels == cid + idx = np.where(mask)[0] + + e = energies[idx] + pos = positions[idx] + t = times[idx] + + total_e = e.sum() + if total_e <= 0: + continue + + w = e / total_e + cx = np.dot(w, pos[:, 0]) + cy = np.dot(w, pos[:, 1]) + ct = np.dot(w, t) + + dx = pos[:, 0] - cx + dy = pos[:, 1] - cy + rms = np.sqrt(np.dot(w, dx**2 + dy**2)) + + clusters.append({ + "cluster_id": int(cid), + "hit_indices": idx.tolist(), + "n_hits": int(len(idx)), + "total_energy": float(total_e), + "centroid_x": float(cx), + "centroid_y": float(cy), + "time": float(ct), + "rms_width": float(rms), + "max_hit_fraction": float(e.max() / total_e), + }) + + return clusters + + +def compute_summary_statistics(clusters): + """Compute aggregate statistics over all clusters. + + Parameters + ---------- + clusters : list of dict + Output from compute_cluster_features. + + Returns + ------- + dict with n_clusters, mean/median n_hits, mean/median energy. + """ + if not clusters: + return { + "n_clusters": 0, + "mean_n_hits": 0.0, + "median_n_hits": 0.0, + "mean_energy": 0.0, + "median_energy": 0.0, + } + + sizes = np.array([c["n_hits"] for c in clusters]) + energies = np.array([c["total_energy"] for c in clusters]) + + return { + "n_clusters": len(clusters), + "mean_n_hits": float(sizes.mean()), + "median_n_hits": float(np.median(sizes)), + "mean_energy": float(energies.mean()), + "median_energy": float(np.median(energies)), + } diff --git a/CaloClusterGNN/src/models/__init__.py b/CaloClusterGNN/src/models/__init__.py new file mode 100644 index 0000000..d6ad72a --- /dev/null +++ b/CaloClusterGNN/src/models/__init__.py @@ -0,0 +1,43 @@ +"""GNN model architectures for calorimeter edge classification. + +Models +------ +SimpleEdgeNet Lightweight baseline (215K params): MLP encoders, sum MP, edge MLP head. +CaloClusterNet Multi-task model (676K params): residual MP, gated aggregation, + node saliency + edge clustering heads. + +The ``build_model(cfg)`` factory instantiates the model specified in a YAML config. +""" + +from src.models.simple_edge_net import SimpleEdgeNet +from src.models.calo_cluster_net import CaloClusterNet + + +def build_model(cfg): + """Instantiate model from config dict. + + Parameters + ---------- + cfg : dict + Full config with 'model' section containing 'name', 'hidden_dim', + 'n_mp_layers', 'dropout'. + """ + model_cfg = cfg["model"] + name = model_cfg.get("name", "SimpleEdgeNet") + + if name == "SimpleEdgeNet": + return SimpleEdgeNet( + node_dim=6, edge_dim=8, + hidden_dim=model_cfg.get("hidden_dim", 64), + n_mp_layers=model_cfg.get("n_mp_layers", 3), + dropout=model_cfg.get("dropout", 0.1), + ) + elif name == "CaloClusterNet": + return CaloClusterNet( + node_dim=6, edge_dim=8, + hidden_dim=model_cfg.get("hidden_dim", 96), + n_mp_layers=model_cfg.get("n_mp_layers", 4), + dropout=model_cfg.get("dropout", 0.1), + ) + else: + raise ValueError(f"Unknown model: {name}") diff --git a/CaloClusterGNN/src/models/calo_cluster_net.py b/CaloClusterGNN/src/models/calo_cluster_net.py new file mode 100644 index 0000000..a6bf732 --- /dev/null +++ b/CaloClusterGNN/src/models/calo_cluster_net.py @@ -0,0 +1,101 @@ +""" +CaloClusterNet — multi-task edge-centric GNN for calorimeter clustering. + +Architecture: + - Node encoder: MLP(6 → hidden → hidden) + - Edge encoder: MLP(8 → hidden → hidden) + - N × EdgeAwareResBlock (residual MP with gated aggregation + global context) + - Node saliency head → q_i logit + - Edge clustering head → s_ij logit ([h_i, h_j, e_ij, |h_i-h_j|]) + +Outputs raw logits — apply sigmoid in loss/inference. + +Forward returns a dict: + {"edge_logits": Tensor(E,), "node_logits": Tensor(N,)} +so that the trainer can compute multi-task loss. +""" + +import torch +import torch.nn as nn + +from src.models.layers import EdgeAwareResBlock +from src.models.heads import NodeSaliencyHead, EdgeClusteringHead + + +class CaloClusterNet(nn.Module): + """Multi-task edge-centric GNN for calorimeter clustering. + + Parameters + ---------- + node_dim : int + Input node feature dimension (default 6). + edge_dim : int + Input edge feature dimension (default 8). + hidden_dim : int + Hidden dimension throughout (default 96). + n_mp_layers : int + Number of EdgeAwareResBlock rounds (default 4). + dropout : float + Dropout rate in all MLPs (default 0.1). + """ + + def __init__(self, node_dim=6, edge_dim=8, hidden_dim=96, n_mp_layers=4, + dropout=0.1): + super().__init__() + self.n_mp_layers = n_mp_layers + + # Encoders + self.node_encoder = nn.Sequential( + nn.Linear(node_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, hidden_dim), + ) + self.edge_encoder = nn.Sequential( + nn.Linear(edge_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, hidden_dim), + ) + + # Message-passing blocks + self.mp_blocks = nn.ModuleList([ + EdgeAwareResBlock(hidden_dim=hidden_dim, dropout=dropout) + for _ in range(n_mp_layers) + ]) + + # Output heads + self.node_head = NodeSaliencyHead(hidden_dim=hidden_dim, dropout=dropout) + self.edge_head = EdgeClusteringHead(hidden_dim=hidden_dim, dropout=dropout) + + def forward(self, data): + """Forward pass. + + Parameters + ---------- + data : torch_geometric.data.Data + Must have x, edge_index, edge_attr. + + Returns + ------- + dict with: + edge_logits : Tensor (E,) + node_logits : Tensor (N,) + """ + x = data.x + edge_index = data.edge_index + edge_attr = data.edge_attr + + # Encode + h = self.node_encoder(x) + e = self.edge_encoder(edge_attr) + + # Message passing + for block in self.mp_blocks: + h, e = block(h, e, edge_index) + + # Heads + edge_logits = self.edge_head(h, e, edge_index) + node_logits = self.node_head(h) + + return {"edge_logits": edge_logits, "node_logits": node_logits} diff --git a/CaloClusterGNN/src/models/heads.py b/CaloClusterGNN/src/models/heads.py new file mode 100644 index 0000000..4056a71 --- /dev/null +++ b/CaloClusterGNN/src/models/heads.py @@ -0,0 +1,79 @@ +""" +Output heads for CaloClusterNet. + +NodeSaliencyHead — per-node binary classification (signal vs noise). +EdgeClusteringHead — per-edge binary classification (same cluster). +""" + +import torch +import torch.nn as nn + + +class NodeSaliencyHead(nn.Module): + """Predict per-node saliency (signal probability). + + Architecture: Linear(hidden, 64) → GELU → Dropout → Linear(64, 1) + Returns raw logits (apply sigmoid externally). + """ + + def __init__(self, hidden_dim=96, dropout=0.1): + super().__init__() + self.net = nn.Sequential( + nn.Linear(hidden_dim, 64), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(64, 1), + ) + + def forward(self, h): + """ + Parameters + ---------- + h : Tensor (N, hidden_dim) + + Returns + ------- + logits : Tensor (N,) + """ + return self.net(h).squeeze(-1) + + +class EdgeClusteringHead(nn.Module): + """Predict per-edge same-cluster probability. + + Input: [h_i, h_j, e_ij, |h_i - h_j|] → 4*hidden_dim + Architecture: MLP → 1 logit + Returns raw logits (apply sigmoid externally). + """ + + def __init__(self, hidden_dim=96, dropout=0.1): + super().__init__() + input_dim = 4 * hidden_dim # h_i, h_j, e_ij, |h_i - h_j| + self.net = nn.Sequential( + nn.Linear(input_dim, 2 * hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(2 * hidden_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, 1), + ) + + def forward(self, h, e, edge_index): + """ + Parameters + ---------- + h : Tensor (N, hidden_dim) + e : Tensor (E, hidden_dim) + edge_index : Tensor (2, E) + + Returns + ------- + logits : Tensor (E,) + """ + src, dst = edge_index + h_src = h[src] + h_dst = h[dst] + diff = torch.abs(h_src - h_dst) + edge_repr = torch.cat([h_src, h_dst, e, diff], dim=1) + return self.net(edge_repr).squeeze(-1) diff --git a/CaloClusterGNN/src/models/layers.py b/CaloClusterGNN/src/models/layers.py new file mode 100644 index 0000000..4ceb3bb --- /dev/null +++ b/CaloClusterGNN/src/models/layers.py @@ -0,0 +1,93 @@ +""" +EdgeAwareResBlock — residual message-passing block for CaloClusterNet. + +Each block performs four steps: + A. Edge update (residual): m_ij = MLP([h_i, h_j, e_ij]); e_ij ← LN(e_ij + m_ij) + B. Gated aggregation: g_ij = σ(Linear(e_ij)); a_i = Σ_j g_ji · e_ji + C. Node update (residual): u_i = MLP([h_i, a_i]); h_i ← LN(h_i + u_i) + D. Global context: c = mean(h_i); h_i ← h_i + Linear(c) +""" + +import torch +import torch.nn as nn +from torch_geometric.utils import scatter + + +class EdgeAwareResBlock(nn.Module): + """One round of edge-aware message passing with residual connections. + + Parameters + ---------- + hidden_dim : int + Dimension of node and edge embeddings (default 96). + dropout : float + Dropout rate in MLPs (default 0.1). + """ + + def __init__(self, hidden_dim=96, dropout=0.1): + super().__init__() + self.hidden_dim = hidden_dim + + # A: Edge update — [h_i, h_j, e_ij] (3*hidden) → hidden + self.edge_mlp = nn.Sequential( + nn.Linear(3 * hidden_dim, 2 * hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(2 * hidden_dim, hidden_dim), + ) + self.edge_norm = nn.LayerNorm(hidden_dim) + + # B: Gated aggregation — gate per edge + self.gate_linear = nn.Linear(hidden_dim, 1) + + # C: Node update — [h_i, a_i] (2*hidden) → hidden + self.node_mlp = nn.Sequential( + nn.Linear(2 * hidden_dim, 2 * hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(2 * hidden_dim, hidden_dim), + ) + self.node_norm = nn.LayerNorm(hidden_dim) + + # D: Global context — project mean node embedding back + self.global_linear = nn.Linear(hidden_dim, hidden_dim) + + def forward(self, h, e, edge_index): + """Forward pass. + + Parameters + ---------- + h : Tensor (N, hidden_dim) + Node embeddings. + e : Tensor (E, hidden_dim) + Edge embeddings. + edge_index : Tensor (2, E) + src → dst edge list. + + Returns + ------- + h : Tensor (N, hidden_dim) + Updated node embeddings. + e : Tensor (E, hidden_dim) + Updated edge embeddings. + """ + src, dst = edge_index + + # A: Edge update with residual + e_in = torch.cat([h[src], h[dst], e], dim=1) + e = self.edge_norm(e + self.edge_mlp(e_in)) + + # B: Gated aggregation (messages flow dst ← src, aggregate over src) + gate = torch.sigmoid(self.gate_linear(e)) # (E, 1) + msg = gate * e # (E, hidden) + agg = scatter(msg, dst, dim=0, dim_size=h.size(0), reduce="sum") + + # C: Node update with residual + h_in = torch.cat([h, agg], dim=1) + h = self.node_norm(h + self.node_mlp(h_in)) + + # D: Global context injection + ctx = h.mean(dim=0, keepdim=True) # (1, hidden) + h = h + self.global_linear(ctx) + + return h, e diff --git a/CaloClusterGNN/src/models/simple_edge_net.py b/CaloClusterGNN/src/models/simple_edge_net.py new file mode 100644 index 0000000..1a886ea --- /dev/null +++ b/CaloClusterGNN/src/models/simple_edge_net.py @@ -0,0 +1,124 @@ +""" +SimpleEdgeNet — lightweight edge-classification GNN baseline. + +Architecture: + - Node encoder: MLP(6 → hidden → hidden) + - Edge encoder: MLP(8 → hidden → hidden) + - N message-passing rounds (sum aggregation, no gating, no residual) + - Edge head: MLP([h_i || h_j || e_ij] → 2*hidden → hidden → 1) + +Outputs raw logits (no sigmoid) — apply sigmoid in loss/inference. +""" + +import torch +import torch.nn as nn +from torch_geometric.utils import scatter + + +class MLP(nn.Module): + """Simple multi-layer perceptron with ReLU.""" + + def __init__(self, dims, dropout=0.0): + super().__init__() + layers = [] + for i in range(len(dims) - 1): + layers.append(nn.Linear(dims[i], dims[i + 1])) + if i < len(dims) - 2: + layers.append(nn.ReLU()) + if dropout > 0: + layers.append(nn.Dropout(dropout)) + self.net = nn.Sequential(*layers) + + def forward(self, x): + """Apply the MLP layers sequentially to input ``x``.""" + return self.net(x) + + +class SimpleEdgeNet(nn.Module): + """Edge-classification GNN with simple sum message passing. + + Parameters + ---------- + node_dim : int + Input node feature dimension (default 6). + edge_dim : int + Input edge feature dimension (default 8). + hidden_dim : int + Hidden dimension throughout (default 64). + n_mp_layers : int + Number of message-passing rounds (default 3). + dropout : float + Dropout rate in MLPs (default 0.1). + """ + + def __init__(self, node_dim=6, edge_dim=8, hidden_dim=64, n_mp_layers=3, + dropout=0.1): + super().__init__() + self.n_mp_layers = n_mp_layers + + # Encoders + self.node_encoder = MLP([node_dim, hidden_dim, hidden_dim], dropout=dropout) + self.edge_encoder = MLP([edge_dim, hidden_dim, hidden_dim], dropout=dropout) + + # Message-passing layers + # Each round: edge update MLP + node update MLP + self.edge_updates = nn.ModuleList() + self.node_updates = nn.ModuleList() + for _ in range(n_mp_layers): + # Edge update: [h_i, h_j, e_ij] -> hidden + self.edge_updates.append( + MLP([3 * hidden_dim, 2 * hidden_dim, hidden_dim], dropout=dropout) + ) + # Node update: [h_i, aggregated_messages] -> hidden + self.node_updates.append( + MLP([2 * hidden_dim, 2 * hidden_dim, hidden_dim], dropout=dropout) + ) + + # Edge classification head: [h_i, h_j, e_ij] -> 1 + self.edge_head = MLP( + [3 * hidden_dim, 2 * hidden_dim, hidden_dim, 1], + dropout=dropout, + ) + + def forward(self, data): + """Forward pass. + + Parameters + ---------- + data : torch_geometric.data.Data + Must have x, edge_index, edge_attr. + + Returns + ------- + edge_logits : Tensor, shape (E,) + Raw logits for edge classification (same-cluster probability + after sigmoid). + """ + x = data.x + edge_index = data.edge_index + edge_attr = data.edge_attr + + # Encode + h = self.node_encoder(x) # (N, hidden) + e = self.edge_encoder(edge_attr) # (E, hidden) + + src, dst = edge_index # src -> dst + + # Message passing + for k in range(self.n_mp_layers): + # Edge update + e_in = torch.cat([h[src], h[dst], e], dim=1) # (E, 3*hidden) + e = self.edge_updates[k](e_in) # (E, hidden) + + # Aggregate messages to destination nodes (sum) + agg = scatter(e, dst, dim=0, dim_size=h.size(0), reduce="sum") + + # Node update + h_in = torch.cat([h, agg], dim=1) # (N, 2*hidden) + h = self.node_updates[k](h_in) # (N, hidden) + + # Edge classification + edge_repr = torch.cat([h[src], h[dst], e], dim=1) # (E, 3*hidden) + edge_logits = self.edge_head(edge_repr).squeeze(-1) # (E,) + + return edge_logits diff --git a/CaloClusterGNN/src/training/__init__.py b/CaloClusterGNN/src/training/__init__.py new file mode 100644 index 0000000..40d6bd4 --- /dev/null +++ b/CaloClusterGNN/src/training/__init__.py @@ -0,0 +1,8 @@ +"""Training infrastructure for edge-classification GNN. + +Modules +------- +trainer Train/val loop with early stopping, LR scheduling, checkpointing. +losses Masked BCE, node saliency loss, consistency regularizer, multi-task loss. +metrics Edge-level (precision, recall, F1, AUC) and cluster-level (purity, completeness). +""" diff --git a/CaloClusterGNN/src/training/losses.py b/CaloClusterGNN/src/training/losses.py new file mode 100644 index 0000000..1fd36f5 --- /dev/null +++ b/CaloClusterGNN/src/training/losses.py @@ -0,0 +1,177 @@ +""" +Loss functions for edge classification training. + +Supports: + - Class-balanced BCE (per-edge weights inversely proportional to class frequency) + - Optional minority subsampling during training + - Edge mask to exclude ambiguous/unassigned edges from loss +""" + +import torch +import torch.nn.functional as F + + +def compute_class_weights(dataset): + """Compute class balance info from training dataset. + + Returns + ------- + dict with n_pos, n_neg, pos_weight (neg/pos ratio for BCE). + """ + n_pos = 0 + n_neg = 0 + for data in dataset: + mask = data.edge_mask.bool() + labels = data.y_edge[mask] + n_pos += (labels == 1).sum().item() + n_neg += (labels == 0).sum().item() + + total = n_pos + n_neg + # pos_weight for BCE: ratio neg/pos — upweights positives when they are rare + # When positives dominate (pos_weight < 1), this downweights them + pos_weight = torch.tensor(n_neg / n_pos) if n_pos > 0 else torch.tensor(1.0) + + return { + "n_pos": n_pos, + "n_neg": n_neg, + "pos_weight": pos_weight, + } + + +def masked_bce_loss(logits, targets, mask, pos_weight=None): + """Compute BCE loss on masked edges with optional class reweighting. + + Parameters + ---------- + logits : Tensor (E,) + Raw edge logits (pre-sigmoid). + targets : Tensor (E,) + Binary edge labels. + mask : Tensor (E,) + Boolean mask — loss computed only where True. + pos_weight : Tensor or None + Weight for positive class in BCE. Values < 1 downweight positives, + > 1 upweight positives. + + Returns + ------- + loss : scalar Tensor + """ + m = mask.bool() + logits_m = logits[m] + targets_m = targets[m].float() + + if logits_m.numel() == 0: + return torch.tensor(0.0, device=logits.device, requires_grad=True) + + loss = F.binary_cross_entropy_with_logits( + logits_m, targets_m, + pos_weight=pos_weight, + ) + return loss + + +def node_saliency_loss(node_logits, y_node, pos_weight=None): + """BCE loss for node saliency prediction. + + Parameters + ---------- + node_logits : Tensor (N,) + Raw logits for node saliency. + y_node : Tensor (N,) + Binary node labels (1 = multi-hit cluster member, 0 = singleton/ambiguous). + pos_weight : Tensor or None + Weight for positive class. Use to handle class imbalance. + + Returns + ------- + loss : scalar Tensor + """ + targets = y_node.float() + + if node_logits.numel() == 0: + return torch.tensor(0.0, device=node_logits.device, requires_grad=True) + + return F.binary_cross_entropy_with_logits( + node_logits, targets, pos_weight=pos_weight) + + +def consistency_loss(edge_logits, node_logits, edge_index): + """Consistency regularizer between node saliency and edge predictions. + + If both endpoints of an edge are predicted as non-salient (low q_i, q_j), + the edge should also be predicted as negative. Penalizes high edge + probability when both node saliencies are low. + + L_cons = mean( σ(s_ij) · (1 - σ(q_i)) · (1 - σ(q_j)) ) + + Parameters + ---------- + edge_logits : Tensor (E,) + node_logits : Tensor (N,) + edge_index : Tensor (2, E) + + Returns + ------- + loss : scalar Tensor + """ + if edge_logits.numel() == 0: + return torch.tensor(0.0, device=edge_logits.device, requires_grad=True) + + src, dst = edge_index + p_edge = torch.sigmoid(edge_logits) + q_src = torch.sigmoid(node_logits[src]) + q_dst = torch.sigmoid(node_logits[dst]) + + # Penalize: edge says "same cluster" but both nodes say "noise" + penalty = p_edge * (1.0 - q_src) * (1.0 - q_dst) + return penalty.mean() + + +def multitask_loss(model_output, batch, pos_weight=None, + lambda_edge=1.0, lambda_node=0.0, lambda_cons=0.0): + """Compute multi-task loss for CaloClusterNet. + + Parameters + ---------- + model_output : dict or Tensor + If dict: {"edge_logits": ..., "node_logits": ...} + If Tensor: edge logits only (SimpleEdgeNet compatibility). + batch : PyG Batch + pos_weight : Tensor or None + lambda_edge, lambda_node, lambda_cons : float + Loss weights for each term. + + Returns + ------- + total_loss : scalar Tensor + loss_dict : dict with individual loss values (for logging) + """ + if isinstance(model_output, torch.Tensor): + # SimpleEdgeNet compatibility — edge-only loss + edge_logits = model_output + l_edge = masked_bce_loss(edge_logits, batch.y_edge, batch.edge_mask, + pos_weight=pos_weight) + return l_edge, {"edge_loss": l_edge.item(), "total_loss": l_edge.item()} + + edge_logits = model_output["edge_logits"] + node_logits = model_output["node_logits"] + + l_edge = masked_bce_loss(edge_logits, batch.y_edge, batch.edge_mask, + pos_weight=pos_weight) + + loss_dict = {"edge_loss": l_edge.item()} + total = lambda_edge * l_edge + + if lambda_node > 0 and node_logits is not None: + l_node = node_saliency_loss(node_logits, batch.y_node) + loss_dict["node_loss"] = l_node.item() + total = total + lambda_node * l_node + + if lambda_cons > 0 and node_logits is not None: + l_cons = consistency_loss(edge_logits, node_logits, batch.edge_index) + loss_dict["cons_loss"] = l_cons.item() + total = total + lambda_cons * l_cons + + loss_dict["total_loss"] = total.item() + return total, loss_dict diff --git a/CaloClusterGNN/src/training/metrics.py b/CaloClusterGNN/src/training/metrics.py new file mode 100644 index 0000000..22a94cf --- /dev/null +++ b/CaloClusterGNN/src/training/metrics.py @@ -0,0 +1,196 @@ +""" +Evaluation metrics for edge classification and cluster quality. + +Edge metrics: precision, recall, F1, ROC AUC, PR AUC. +Cluster metrics: purity, completeness, ARI. +""" + +import numpy as np +import torch +from scipy.sparse import coo_matrix +from scipy.sparse.csgraph import connected_components + + +def node_saliency_metrics(node_logits, y_node, threshold=0.5): + """Compute node saliency classification metrics. + + Parameters + ---------- + node_logits : Tensor (N,) + Raw logits for node saliency. + y_node : Tensor (N,) + Node labels. >= 0 means signal (assigned to truth cluster), -1 means noise. + threshold : float + Classification threshold on sigmoid(logits). + + Returns + ------- + dict with precision, recall, f1, accuracy, n_signal, n_noise. + """ + probs = torch.sigmoid(node_logits).cpu().numpy() + targets = (y_node >= 0).cpu().numpy().astype(int) + + preds = (probs >= threshold).astype(int) + + tp = ((preds == 1) & (targets == 1)).sum() + fp = ((preds == 1) & (targets == 0)).sum() + fn = ((preds == 0) & (targets == 1)).sum() + tn = ((preds == 0) & (targets == 0)).sum() + + precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0 + f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0 + accuracy = (tp + tn) / len(targets) if len(targets) > 0 else 0.0 + + return { + "node_precision": float(precision), + "node_recall": float(recall), + "node_f1": float(f1), + "node_accuracy": float(accuracy), + "n_signal": int(targets.sum()), + "n_noise": int((1 - targets).sum()), + } + + +def edge_metrics(logits, targets, mask, threshold=0.5): + """Compute edge classification metrics on masked edges. + + Parameters + ---------- + logits : Tensor (E,) + Raw logits (pre-sigmoid). + targets : Tensor (E,) + Binary labels. + mask : Tensor (E,) + Boolean mask. + threshold : float + Classification threshold on sigmoid(logits). + + Returns + ------- + dict with precision, recall, f1, accuracy, n_pos, n_neg. + """ + m = mask.bool() + probs = torch.sigmoid(logits[m]).cpu().numpy() + y = targets[m].cpu().numpy().astype(int) + + preds = (probs >= threshold).astype(int) + + tp = ((preds == 1) & (y == 1)).sum() + fp = ((preds == 1) & (y == 0)).sum() + fn = ((preds == 0) & (y == 1)).sum() + tn = ((preds == 0) & (y == 0)).sum() + + precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0 + f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0 + accuracy = (tp + tn) / len(y) if len(y) > 0 else 0.0 + + return { + "precision": float(precision), + "recall": float(recall), + "f1": float(f1), + "accuracy": float(accuracy), + "n_pos": int(y.sum()), + "n_neg": int((1 - y).sum()), + } + + +def edge_auc(logits, targets, mask): + """Compute ROC AUC and PR AUC on masked edges. + + Returns dict with roc_auc, pr_auc. Returns 0.0 if single-class. + """ + from sklearn.metrics import roc_auc_score, average_precision_score + + m = mask.bool() + probs = torch.sigmoid(logits[m]).detach().cpu().numpy() + y = targets[m].cpu().numpy().astype(int) + + if len(np.unique(y)) < 2: + return {"roc_auc": 0.0, "pr_auc": 0.0} + + return { + "roc_auc": float(roc_auc_score(y, probs)), + "pr_auc": float(average_precision_score(y, probs)), + } + + +def cluster_metrics_from_edges(edge_index, edge_probs, edge_mask, hit_truth_cluster, + n_nodes, threshold=0.5): + """Reconstruct clusters from predicted edges and compare to truth. + + Parameters + ---------- + edge_index : Tensor (2, E) + edge_probs : Tensor (E,) + Sigmoid probabilities. + edge_mask : Tensor (E,) + hit_truth_cluster : Tensor (N,) + Per-node truth cluster ID (-1 = unassigned). + n_nodes : int + threshold : float + + Returns + ------- + dict with purity, completeness, n_pred_clusters, n_truth_clusters. + """ + ei = edge_index.cpu().numpy() + probs = edge_probs.cpu().numpy() + mask = edge_mask.bool().cpu().numpy() + truth = hit_truth_cluster.cpu().numpy() + + # Build predicted adjacency from positive edges + pred_pos = (probs >= threshold) & mask + if pred_pos.sum() == 0: + return {"purity": 0.0, "completeness": 0.0, + "n_pred_clusters": 0, "n_truth_clusters": 0} + + src = ei[0, pred_pos] + dst = ei[1, pred_pos] + # Symmetrize + src_sym = np.concatenate([src, dst]) + dst_sym = np.concatenate([dst, src]) + vals = np.ones(len(src_sym), dtype=np.float32) + + adj = coo_matrix((vals, (src_sym, dst_sym)), shape=(n_nodes, n_nodes)) + n_components, labels = connected_components(adj, directed=False) + + # Compute purity and completeness for assigned hits + assigned = truth >= 0 + if assigned.sum() == 0: + return {"purity": 0.0, "completeness": 0.0, + "n_pred_clusters": n_components, "n_truth_clusters": 0} + + truth_ids = np.unique(truth[assigned]) + pred_ids = np.unique(labels[assigned]) + + # Purity: for each predicted cluster, fraction of hits from dominant truth cluster + purities = [] + for pid in pred_ids: + pmask = (labels == pid) & assigned + if pmask.sum() == 0: + continue + tc = truth[pmask] + dominant_count = np.bincount(tc).max() + purities.append(dominant_count / pmask.sum()) + + # Completeness: for each truth cluster, fraction of hits in dominant predicted cluster + completenesses = [] + for tid in truth_ids: + tmask = (truth == tid) & assigned + if tmask.sum() == 0: + continue + pc = labels[tmask] + dominant_count = np.bincount(pc).max() + completenesses.append(dominant_count / tmask.sum()) + + purity = float(np.mean(purities)) if purities else 0.0 + completeness = float(np.mean(completenesses)) if completenesses else 0.0 + + return { + "purity": purity, + "completeness": completeness, + "n_pred_clusters": int(len(pred_ids)), + "n_truth_clusters": int(len(truth_ids)), + } diff --git a/CaloClusterGNN/src/training/trainer.py b/CaloClusterGNN/src/training/trainer.py new file mode 100644 index 0000000..42feaed --- /dev/null +++ b/CaloClusterGNN/src/training/trainer.py @@ -0,0 +1,287 @@ +""" +Training loop for edge classification GNN. + +Handles: + - Train/val epoch loops with DataLoader + - Weighted BCE with negative subsampling (train only) + - ReduceLROnPlateau scheduling on val F1 + - Early stopping + - Checkpointing best model + - Per-epoch metric logging +""" + +import json +import time +from pathlib import Path + +import torch +from torch_geometric.loader import DataLoader + +from src.training.losses import masked_bce_loss, multitask_loss, compute_class_weights +from src.training.metrics import ( + edge_metrics, edge_auc, cluster_metrics_from_edges, node_saliency_metrics, +) + + +class Trainer: + """Edge classification trainer. + + Parameters + ---------- + model : nn.Module + train_dataset : Dataset + val_dataset : Dataset + cfg : dict + Training config (from default.yaml 'train' section). + pos_weight : Tensor or None + device : torch.device + run_dir : Path + Directory for saving checkpoints and logs. + """ + + def __init__(self, model, train_dataset, val_dataset, cfg, pos_weight=None, + device=None, run_dir=None): + self.model = model + self.cfg = cfg + self.device = device or torch.device("cpu") + self.model.to(self.device) + + self.train_loader = DataLoader( + train_dataset, batch_size=cfg.get("batch_size", 32), shuffle=True, + ) + self.val_loader = DataLoader( + val_dataset, batch_size=cfg.get("batch_size", 32), shuffle=False, + ) + + self.pos_weight = pos_weight.to(self.device) if pos_weight is not None else None + + # Multi-task loss weights + self.lambda_edge = cfg.get("lambda_edge", 1.0) + self.lambda_node = cfg.get("lambda_node", 0.0) + self.lambda_cons = cfg.get("lambda_cons", 0.0) + + # Optimizer + self.optimizer = torch.optim.AdamW( + model.parameters(), + lr=cfg.get("lr", 1e-3), + weight_decay=cfg.get("weight_decay", 1e-4), + ) + + # Scheduler + self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, mode="max", factor=0.5, + patience=cfg.get("early_stop_patience", 15) // 3, + min_lr=1e-6, + ) + + self.epochs = cfg.get("epochs", 100) + self.patience = cfg.get("early_stop_patience", 15) + + # Logging + self.run_dir = Path(run_dir) if run_dir else Path("outputs/runs/default") + self.run_dir.mkdir(parents=True, exist_ok=True) + self.checkpoint_dir = self.run_dir / "checkpoints" + self.checkpoint_dir.mkdir(exist_ok=True) + + self.history = [] + self.best_val_f1 = 0.0 + self.epochs_without_improvement = 0 + + def _extract_edge_logits(self, output): + """Extract edge logits from model output (Tensor for SimpleEdgeNet, dict for CaloClusterNet).""" + if isinstance(output, dict): + return output["edge_logits"] + return output + + def train_epoch(self): + """Run one training epoch. Returns dict of average metrics.""" + self.model.train() + total_loss = 0.0 + total_sub_losses = {} + n_batches = 0 + all_logits, all_targets, all_masks = [], [], [] + + for batch in self.train_loader: + batch = batch.to(self.device) + self.optimizer.zero_grad() + + output = self.model(batch) + loss, loss_dict = multitask_loss( + output, batch, pos_weight=self.pos_weight, + lambda_edge=self.lambda_edge, + lambda_node=self.lambda_node, + lambda_cons=self.lambda_cons, + ) + + loss.backward() + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0) + self.optimizer.step() + + total_loss += loss.item() + for k, v in loss_dict.items(): + total_sub_losses[k] = total_sub_losses.get(k, 0.0) + v + n_batches += 1 + + all_logits.append(self._extract_edge_logits(output).detach()) + all_targets.append(batch.y_edge) + all_masks.append(batch.edge_mask) + + # Aggregate metrics + logits_cat = torch.cat(all_logits) + targets_cat = torch.cat(all_targets) + masks_cat = torch.cat(all_masks) + + em = edge_metrics(logits_cat, targets_cat, masks_cat) + em["loss"] = total_loss / max(n_batches, 1) + for k, v in total_sub_losses.items(): + em[k] = v / max(n_batches, 1) + return em + + @torch.no_grad() + def val_epoch(self): + """Run one validation epoch. Returns dict of metrics.""" + self.model.eval() + + if len(self.val_loader.dataset) == 0: + return {"loss": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0, + "accuracy": 0.0, "n_pos": 0, "n_neg": 0, + "roc_auc": 0.0, "pr_auc": 0.0} + + total_loss = 0.0 + total_sub_losses = {} + n_batches = 0 + all_logits, all_targets, all_masks = [], [], [] + + all_node_logits, all_y_node = [], [] + + for batch in self.val_loader: + batch = batch.to(self.device) + + output = self.model(batch) + loss, loss_dict = multitask_loss( + output, batch, pos_weight=self.pos_weight, + lambda_edge=self.lambda_edge, + lambda_node=self.lambda_node, + lambda_cons=self.lambda_cons, + ) + + total_loss += loss.item() + for k, v in loss_dict.items(): + total_sub_losses[k] = total_sub_losses.get(k, 0.0) + v + n_batches += 1 + + all_logits.append(self._extract_edge_logits(output)) + all_targets.append(batch.y_edge) + all_masks.append(batch.edge_mask) + + if isinstance(output, dict) and "node_logits" in output: + all_node_logits.append(output["node_logits"]) + all_y_node.append(batch.y_node) + + # Edge metrics (full val set, no subsampling) + logits_cat = torch.cat(all_logits) + targets_cat = torch.cat(all_targets) + masks_cat = torch.cat(all_masks) + + em = edge_metrics(logits_cat, targets_cat, masks_cat) + auc = edge_auc(logits_cat, targets_cat, masks_cat) + em.update(auc) + em["loss"] = total_loss / max(n_batches, 1) + for k, v in total_sub_losses.items(): + em[k] = v / max(n_batches, 1) + + # Node saliency metrics (if available) + if all_node_logits: + nm = node_saliency_metrics( + torch.cat(all_node_logits), torch.cat(all_y_node), + ) + em.update(nm) + + return em + + def fit(self): + """Run full training loop. Returns history list.""" + print(f"Training on {self.device} for up to {self.epochs} epochs") + print(f" Train graphs: {len(self.train_loader.dataset)}") + print(f" Val graphs: {len(self.val_loader.dataset)}") + if self.pos_weight is not None: + print(f" pos_weight: {self.pos_weight.item():.2f}") + print(f" Run dir: {self.run_dir}") + print() + + for epoch in range(1, self.epochs + 1): + t0 = time.time() + + train_m = self.train_epoch() + val_m = self.val_epoch() + + # Step scheduler on val F1 + self.scheduler.step(val_m["f1"]) + lr = self.optimizer.param_groups[0]["lr"] + + elapsed = time.time() - t0 + + record = { + "epoch": epoch, + "lr": lr, + "elapsed_s": round(elapsed, 1), + "train": train_m, + "val": val_m, + } + self.history.append(record) + + # Print progress + print( + f"Epoch {epoch:3d} | " + f"train loss {train_m['loss']:.4f} F1 {train_m['f1']:.3f} | " + f"val loss {val_m['loss']:.4f} F1 {val_m['f1']:.3f} " + f"P {val_m['precision']:.3f} R {val_m['recall']:.3f} | " + f"lr {lr:.1e} | {elapsed:.1f}s" + ) + if "roc_auc" in val_m: + print( + f" val ROC-AUC {val_m['roc_auc']:.3f} " + f"PR-AUC {val_m['pr_auc']:.3f}" + ) + if "node_f1" in val_m: + print( + f" val node F1 {val_m['node_f1']:.3f} " + f"P {val_m['node_precision']:.3f} " + f"R {val_m['node_recall']:.3f}" + ) + + # Check for NaN + if torch.isnan(torch.tensor(train_m["loss"])): + print("NaN loss detected — stopping training.") + break + + # Best model checkpoint + if val_m["f1"] > self.best_val_f1: + self.best_val_f1 = val_m["f1"] + self.epochs_without_improvement = 0 + ckpt_path = self.checkpoint_dir / "best_model.pt" + torch.save({ + "epoch": epoch, + "model_state_dict": self.model.state_dict(), + "optimizer_state_dict": self.optimizer.state_dict(), + "val_f1": val_m["f1"], + "val_metrics": val_m, + }, ckpt_path) + print(f" -> New best val F1 = {val_m['f1']:.4f}, saved to {ckpt_path}") + else: + self.epochs_without_improvement += 1 + + # Early stopping + if self.epochs_without_improvement >= self.patience: + print(f"Early stopping at epoch {epoch} " + f"(no improvement for {self.patience} epochs)") + break + + # Save history + history_path = self.run_dir / "history.json" + with open(history_path, "w") as f: + json.dump(self.history, f, indent=2) + print(f"\nTraining complete. Best val F1 = {self.best_val_f1:.4f}") + print(f"History saved to {history_path}") + + return self.history diff --git a/CaloClusterGNN/tests/test_calo_cluster_net.py b/CaloClusterGNN/tests/test_calo_cluster_net.py new file mode 100644 index 0000000..3cd4e04 --- /dev/null +++ b/CaloClusterGNN/tests/test_calo_cluster_net.py @@ -0,0 +1,219 @@ +"""Unit tests for CaloClusterNet and its components.""" + +import sys +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import torch +from torch_geometric.data import Data + +from src.models.layers import EdgeAwareResBlock +from src.models.heads import NodeSaliencyHead, EdgeClusteringHead +from src.models.calo_cluster_net import CaloClusterNet + + +def _make_graph(n_nodes=10, n_edges=20, node_dim=6, edge_dim=8): + """Create a synthetic PyG Data object for testing.""" + x = torch.randn(n_nodes, node_dim) + edge_index = torch.randint(0, n_nodes, (2, n_edges)) + edge_attr = torch.randn(n_edges, edge_dim) + y_edge = torch.randint(0, 2, (n_edges,)).float() + y_node = torch.randint(-1, 3, (n_nodes,)).float() + edge_mask = torch.ones(n_edges, dtype=torch.bool) + return Data(x=x, edge_index=edge_index, edge_attr=edge_attr, + y_edge=y_edge, y_node=y_node, edge_mask=edge_mask) + + +class TestEdgeAwareResBlock(unittest.TestCase): + + def test_output_shapes(self): + block = EdgeAwareResBlock(hidden_dim=32, dropout=0.0) + h = torch.randn(10, 32) + e = torch.randn(20, 32) + edge_index = torch.randint(0, 10, (2, 20)) + + h_out, e_out = block(h, e, edge_index) + self.assertEqual(h_out.shape, (10, 32)) + self.assertEqual(e_out.shape, (20, 32)) + + def test_residual_connection(self): + """Edge and node outputs should differ from inputs (residual adds, doesn't replace).""" + block = EdgeAwareResBlock(hidden_dim=16, dropout=0.0) + h = torch.randn(5, 16) + e = torch.randn(8, 16) + edge_index = torch.randint(0, 5, (2, 8)) + + h_out, e_out = block(h, e, edge_index) + # Outputs shouldn't be identical to inputs (MLP + residual changes them) + self.assertFalse(torch.allclose(h, h_out, atol=1e-6)) + self.assertFalse(torch.allclose(e, e_out, atol=1e-6)) + + def test_gradients_flow(self): + block = EdgeAwareResBlock(hidden_dim=16, dropout=0.0) + h = torch.randn(5, 16, requires_grad=True) + e = torch.randn(8, 16, requires_grad=True) + edge_index = torch.randint(0, 5, (2, 8)) + + h_out, e_out = block(h, e, edge_index) + loss = h_out.sum() + e_out.sum() + loss.backward() + self.assertIsNotNone(h.grad) + self.assertIsNotNone(e.grad) + + +class TestNodeSaliencyHead(unittest.TestCase): + + def test_output_shape(self): + head = NodeSaliencyHead(hidden_dim=32, dropout=0.0) + h = torch.randn(10, 32) + out = head(h) + self.assertEqual(out.shape, (10,)) + + def test_output_unbounded(self): + """Raw logits should be unbounded (no sigmoid applied).""" + head = NodeSaliencyHead(hidden_dim=32, dropout=0.0) + h = torch.randn(100, 32) * 10 + out = head(h) + # At least some outputs should be outside [0, 1] + self.assertTrue((out < 0).any() or (out > 1).any()) + + +class TestEdgeClusteringHead(unittest.TestCase): + + def test_output_shape(self): + head = EdgeClusteringHead(hidden_dim=32, dropout=0.0) + h = torch.randn(10, 32) + e = torch.randn(20, 32) + edge_index = torch.randint(0, 10, (2, 20)) + out = head(h, e, edge_index) + self.assertEqual(out.shape, (20,)) + + +class TestCaloClusterNet(unittest.TestCase): + + def test_forward_returns_dict(self): + model = CaloClusterNet(hidden_dim=32, n_mp_layers=2, dropout=0.0) + data = _make_graph() + out = model(data) + self.assertIsInstance(out, dict) + self.assertIn("edge_logits", out) + self.assertIn("node_logits", out) + + def test_output_shapes(self): + model = CaloClusterNet(hidden_dim=32, n_mp_layers=2, dropout=0.0) + data = _make_graph(n_nodes=15, n_edges=30) + out = model(data) + self.assertEqual(out["edge_logits"].shape, (30,)) + self.assertEqual(out["node_logits"].shape, (15,)) + + def test_default_config(self): + """Default config: hidden=96, 4 MP layers.""" + model = CaloClusterNet() + data = _make_graph() + out = model(data) + self.assertEqual(out["edge_logits"].shape, (20,)) + self.assertEqual(out["node_logits"].shape, (10,)) + + def test_param_count_larger_than_simple(self): + from src.models.simple_edge_net import SimpleEdgeNet + simple = SimpleEdgeNet(hidden_dim=64, n_mp_layers=3) + v1 = CaloClusterNet(hidden_dim=96, n_mp_layers=4) + n_simple = sum(p.numel() for p in simple.parameters()) + n_v1 = sum(p.numel() for p in v1.parameters()) + self.assertGreater(n_v1, n_simple) + + def test_gradients_flow_through_all_heads(self): + model = CaloClusterNet(hidden_dim=16, n_mp_layers=2, dropout=0.0) + data = _make_graph() + out = model(data) + loss = out["edge_logits"].sum() + out["node_logits"].sum() + loss.backward() + # All parameters should have gradients + for name, p in model.named_parameters(): + self.assertIsNotNone(p.grad, f"No gradient for {name}") + + def test_single_node_graph(self): + """Model should handle a graph with 1 node and 0 edges.""" + model = CaloClusterNet(hidden_dim=16, n_mp_layers=2, dropout=0.0) + data = Data( + x=torch.randn(1, 6), + edge_index=torch.zeros(2, 0, dtype=torch.long), + edge_attr=torch.zeros(0, 8), + ) + out = model(data) + self.assertEqual(out["edge_logits"].shape, (0,)) + self.assertEqual(out["node_logits"].shape, (1,)) + + def test_eval_mode_deterministic(self): + model = CaloClusterNet(hidden_dim=16, n_mp_layers=2, dropout=0.1) + model.eval() + data = _make_graph() + out1 = model(data) + out2 = model(data) + self.assertTrue(torch.allclose(out1["edge_logits"], out2["edge_logits"])) + self.assertTrue(torch.allclose(out1["node_logits"], out2["node_logits"])) + + def test_compatible_with_reconstruct_clusters(self): + """Edge logits can be fed directly to reconstruct_clusters.""" + from src.inference.cluster_reco import reconstruct_clusters + model = CaloClusterNet(hidden_dim=16, n_mp_layers=2, dropout=0.0) + model.eval() + data = _make_graph(n_nodes=10, n_edges=20) + with torch.no_grad(): + out = model(data) + labels, probs = reconstruct_clusters( + data.edge_index, out["edge_logits"], n_nodes=10, + tau_edge=0.5, min_hits=1, min_energy_mev=0.0, + ) + self.assertEqual(labels.shape, (10,)) + + def test_compatible_with_predict_clusters(self): + """predict_clusters works with CaloClusterNet (dict output).""" + from src.inference.cluster_reco import predict_clusters + model = CaloClusterNet(hidden_dim=16, n_mp_layers=2, dropout=0.0) + data = _make_graph(n_nodes=10, n_edges=20) + labels, probs = predict_clusters( + model, data, device="cpu", tau_edge=0.5, + min_hits=1, min_energy_mev=0.0, + ) + self.assertEqual(labels.shape, (10,)) + + +class TestMultitaskLoss(unittest.TestCase): + + def test_edge_only_stage(self): + """lambda_node=0, lambda_cons=0 should give edge-only loss.""" + from src.training.losses import multitask_loss + data = _make_graph() + output = {"edge_logits": torch.randn(20), "node_logits": torch.randn(10)} + loss, ld = multitask_loss(output, data, lambda_edge=1.0, + lambda_node=0.0, lambda_cons=0.0) + self.assertIn("edge_loss", ld) + self.assertNotIn("node_loss", ld) + self.assertNotIn("cons_loss", ld) + + def test_multitask_stage(self): + """All three loss terms present when all lambdas > 0.""" + from src.training.losses import multitask_loss + data = _make_graph() + output = {"edge_logits": torch.randn(20), "node_logits": torch.randn(10)} + loss, ld = multitask_loss(output, data, lambda_edge=1.0, + lambda_node=0.3, lambda_cons=0.05) + self.assertIn("edge_loss", ld) + self.assertIn("node_loss", ld) + self.assertIn("cons_loss", ld) + self.assertGreater(loss.item(), 0) + + def test_backward_compatible_with_tensor(self): + """Tensor input (SimpleEdgeNet) should still work.""" + from src.training.losses import multitask_loss + data = _make_graph() + logits = torch.randn(20) + loss, ld = multitask_loss(logits, data) + self.assertIn("edge_loss", ld) + + +if __name__ == "__main__": + unittest.main() diff --git a/CaloClusterGNN/tests/test_graph_builder.py b/CaloClusterGNN/tests/test_graph_builder.py new file mode 100644 index 0000000..92ce22a --- /dev/null +++ b/CaloClusterGNN/tests/test_graph_builder.py @@ -0,0 +1,279 @@ +"""Unit tests for src/data/graph_builder.py.""" + +import numpy as np +import sys +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +from src.data.graph_builder import ( + build_graph, + compute_edge_features, + compute_node_features, +) + + +# ─────────────────────────────────────────────────────────────────── +# build_graph tests +# ─────────────────────────────────────────────────────────────────── + +class TestBuildGraph(unittest.TestCase): + """Tests for build_graph().""" + + def test_empty_input(self): + """Zero hits → empty edge_index.""" + positions = np.empty((0, 2)) + times = np.empty(0) + ei, diag = build_graph(positions, times) + self.assertEqual(ei.shape, (2, 0)) + self.assertEqual(diag["n_nodes"], 0) + self.assertEqual(diag["n_edges"], 0) + + def test_single_hit(self): + """One hit → no edges possible.""" + positions = np.array([[0.0, 0.0]]) + times = np.array([500.0]) + ei, diag = build_graph(positions, times) + self.assertEqual(ei.shape, (2, 0)) + self.assertEqual(diag["n_nodes"], 1) + self.assertEqual(diag["n_isolated"], 1) + + def test_two_close_hits_connected(self): + """Two hits within r_max and dt_max → bidirectional edge.""" + positions = np.array([[0.0, 0.0], [10.0, 0.0]]) + times = np.array([500.0, 502.0]) + ei, diag = build_graph(positions, times, r_max=50.0, dt_max=25.0) + self.assertEqual(diag["n_edges"], 2) # bidirectional + # Both directions present + edges = set(zip(ei[0].tolist(), ei[1].tolist())) + self.assertIn((0, 1), edges) + self.assertIn((1, 0), edges) + + def test_two_distant_hits_not_connected(self): + """Two hits beyond r_max → no radius edge (may get kNN fallback).""" + positions = np.array([[0.0, 0.0], [200.0, 0.0]]) + times = np.array([500.0, 502.0]) + # With kNN fallback, they still connect (only 2 nodes, k_min=3) + ei, diag = build_graph(positions, times, r_max=50.0, dt_max=25.0, k_min=1) + # With k_min=1, the kNN fallback will connect them since both are isolated + # But if we set k_min=0, no fallback + ei2, diag2 = build_graph(positions, times, r_max=50.0, dt_max=25.0, + k_min=0) + self.assertEqual(diag2["n_edges"], 0) + + def test_time_filter(self): + """Hits within r_max but beyond dt_max → not connected.""" + positions = np.array([[0.0, 0.0], [10.0, 0.0]]) + times = np.array([500.0, 600.0]) # Δt=100 >> dt_max=25 + ei, diag = build_graph(positions, times, r_max=150.0, dt_max=25.0, + k_min=0) + self.assertEqual(diag["n_edges"], 0) + + def test_knn_fallback_connects_isolated(self): + """Isolated node beyond r_max gets kNN fallback edges.""" + # Three close hits + one far hit + positions = np.array([ + [0.0, 0.0], [10.0, 0.0], [0.0, 10.0], # cluster + [300.0, 0.0], # isolated + ]) + times = np.array([500.0, 501.0, 502.0, 503.0]) + ei, diag = build_graph(positions, times, r_max=50.0, dt_max=25.0, + k_min=2) + # Node 3 should have at least some edges from kNN fallback + node3_edges = (ei[0] == 3).sum() + self.assertGreaterEqual(node3_edges, 1) + self.assertEqual(diag["n_isolated"], 0) + + def test_knn_fallback_respects_time(self): + """kNN fallback still filters by dt_max.""" + positions = np.array([[0.0, 0.0], [300.0, 0.0]]) + times = np.array([500.0, 600.0]) # Δt=100 >> dt_max=25 + ei, diag = build_graph(positions, times, r_max=50.0, dt_max=25.0, + k_min=3) + # Even with kNN fallback, time filter should block the edge + self.assertEqual(diag["n_edges"], 0) + + def test_degree_cap(self): + """Degree cap limits max neighbors per node.""" + # Create a star: one central node connected to many peripheral nodes + n = 30 + angles = np.linspace(0, 2 * np.pi, n, endpoint=False) + positions = np.zeros((n + 1, 2)) + positions[0] = [0.0, 0.0] # center + positions[1:, 0] = 40.0 * np.cos(angles) + positions[1:, 1] = 40.0 * np.sin(angles) + times = np.full(n + 1, 500.0) + + ei, diag = build_graph(positions, times, r_max=150.0, dt_max=25.0, + k_min=3, k_max=10) + # Center node should have at most k_max=10 outgoing edges + center_out = (ei[0] == 0).sum() + self.assertLessEqual(center_out, 10) + self.assertLessEqual(diag["max_degree"], 10) + + def test_directed_edges(self): + """Edge index contains directed edges (both directions).""" + positions = np.array([[0.0, 0.0], [10.0, 0.0], [5.0, 8.0]]) + times = np.array([500.0, 501.0, 502.0]) + ei, _ = build_graph(positions, times, r_max=150.0, dt_max=25.0) + edges = set(zip(ei[0].tolist(), ei[1].tolist())) + # If (i,j) is present, (j,i) should also be present + for s, d in list(edges): + self.assertIn((d, s), edges, f"Missing reverse edge ({d},{s})") + + def test_no_self_loops(self): + """Edge index should not contain self-loops.""" + positions = np.array([[0.0, 0.0], [10.0, 0.0], [5.0, 8.0]]) + times = np.array([500.0, 501.0, 502.0]) + ei, _ = build_graph(positions, times, r_max=150.0, dt_max=25.0) + self.assertTrue(np.all(ei[0] != ei[1])) + + def test_diagnostics_correct(self): + """Diagnostics match actual edge_index statistics.""" + positions = np.array([ + [0.0, 0.0], [10.0, 0.0], [0.0, 10.0], + [10.0, 10.0], [50.0, 50.0], + ]) + times = np.full(5, 500.0) + ei, diag = build_graph(positions, times, r_max=150.0, dt_max=25.0) + self.assertEqual(diag["n_nodes"], 5) + self.assertEqual(diag["n_edges"], ei.shape[1]) + degree = np.bincount(ei[0], minlength=5) + self.assertEqual(diag["min_degree"], int(degree.min())) + self.assertEqual(diag["max_degree"], int(degree.max())) + self.assertAlmostEqual(diag["avg_degree"], float(degree.mean()), places=5) + + def test_edge_index_dtype(self): + """Edge index has int64 dtype.""" + positions = np.array([[0.0, 0.0], [10.0, 0.0]]) + times = np.array([500.0, 501.0]) + ei, _ = build_graph(positions, times, r_max=150.0, dt_max=25.0) + self.assertEqual(ei.dtype, np.int64) + + +# ─────────────────────────────────────────────────────────────────── +# compute_node_features tests +# ─────────────────────────────────────────────────────────────────── + +class TestComputeNodeFeatures(unittest.TestCase): + """Tests for compute_node_features().""" + + def test_empty(self): + """Zero hits → shape (0, 6).""" + nf = compute_node_features(np.empty((0, 2)), np.empty(0), np.empty(0)) + self.assertEqual(nf.shape, (0, 6)) + + def test_shape(self): + """Output shape is (n, 6).""" + positions = np.array([[0.0, 0.0], [10.0, 20.0], [30.0, 40.0]]) + times = np.array([500.0, 510.0, 520.0]) + energies = np.array([1.0, 2.0, 3.0]) + nf = compute_node_features(positions, times, energies) + self.assertEqual(nf.shape, (3, 6)) + self.assertEqual(nf.dtype, np.float32) + + def test_feature_values(self): + """Spot-check individual feature columns.""" + positions = np.array([[3.0, 4.0]]) + times = np.array([500.0]) + energies = np.array([2.0]) + nf = compute_node_features(positions, times, energies) + self.assertAlmostEqual(nf[0, 0], np.log1p(2.0), places=5) # log energy + self.assertAlmostEqual(nf[0, 1], 500.0, places=5) # time + self.assertAlmostEqual(nf[0, 2], 3.0, places=5) # x + self.assertAlmostEqual(nf[0, 3], 4.0, places=5) # y + self.assertAlmostEqual(nf[0, 4], 5.0, places=5) # radial distance + self.assertAlmostEqual(nf[0, 5], 1.0, places=5) # relative energy (max) + + def test_relative_energy(self): + """Relative energy is E/E_max for each hit.""" + positions = np.array([[0.0, 0.0], [0.0, 0.0]]) + times = np.array([500.0, 500.0]) + energies = np.array([1.0, 4.0]) + nf = compute_node_features(positions, times, energies) + self.assertAlmostEqual(nf[0, 5], 0.25, places=5) + self.assertAlmostEqual(nf[1, 5], 1.0, places=5) + + def test_zero_energy(self): + """All-zero energies → relative energy all zero, no NaN.""" + positions = np.array([[0.0, 0.0]]) + times = np.array([500.0]) + energies = np.array([0.0]) + nf = compute_node_features(positions, times, energies) + self.assertFalse(np.any(np.isnan(nf))) + self.assertAlmostEqual(nf[0, 5], 0.0, places=5) + + +# ─────────────────────────────────────────────────────────────────── +# compute_edge_features tests +# ─────────────────────────────────────────────────────────────────── + +class TestComputeEdgeFeatures(unittest.TestCase): + """Tests for compute_edge_features().""" + + def test_empty(self): + """Zero edges → shape (0, 8).""" + ef = compute_edge_features( + np.empty((0, 2)), np.empty(0), np.empty(0), + np.empty((2, 0), dtype=np.int64), + ) + self.assertEqual(ef.shape, (0, 8)) + + def test_shape(self): + """Output shape is (n_edges, 8).""" + positions = np.array([[0.0, 0.0], [3.0, 4.0]]) + times = np.array([500.0, 505.0]) + energies = np.array([1.0, 2.0]) + ei = np.array([[0, 1], [1, 0]]) + ef = compute_edge_features(positions, times, energies, ei) + self.assertEqual(ef.shape, (2, 8)) + self.assertEqual(ef.dtype, np.float32) + + def test_feature_values(self): + """Spot-check edge feature columns for a single edge.""" + positions = np.array([[0.0, 0.0], [3.0, 4.0]]) + times = np.array([500.0, 505.0]) + energies = np.array([1.0, 3.0]) + ei = np.array([[0], [1]]) # single edge 0→1 + ef = compute_edge_features(positions, times, energies, ei) + self.assertAlmostEqual(ef[0, 0], -3.0, places=5) # Δx + self.assertAlmostEqual(ef[0, 1], -4.0, places=5) # Δy + self.assertAlmostEqual(ef[0, 2], 5.0, places=5) # distance + self.assertAlmostEqual(ef[0, 3], -5.0, places=5) # Δt + # Δ log energy: log(1+1) - log(1+3) + expected_dle = np.log1p(1.0) - np.log1p(3.0) + self.assertAlmostEqual(ef[0, 4], expected_dle, places=5) + # energy asymmetry: (1-3)/(1+3) = -0.5 + self.assertAlmostEqual(ef[0, 5], -0.5, places=5) + # log summed energy: log(1 + 1 + 3) = log(5) + self.assertAlmostEqual(ef[0, 6], np.log1p(4.0), places=5) + + def test_antisymmetric_features(self): + """Δx, Δy, Δt, Δ_log_e, energy_asym flip sign for reversed edge.""" + positions = np.array([[0.0, 0.0], [3.0, 4.0]]) + times = np.array([500.0, 505.0]) + energies = np.array([1.0, 3.0]) + ei = np.array([[0, 1], [1, 0]]) + ef = compute_edge_features(positions, times, energies, ei) + # Antisymmetric: cols 0,1,3,4,5,7 + for col in [0, 1, 3, 4, 5, 7]: + self.assertAlmostEqual(ef[0, col], -ef[1, col], places=5, + msg=f"Column {col} should be antisymmetric") + # Symmetric: cols 2,6 (distance, log summed energy) + for col in [2, 6]: + self.assertAlmostEqual(ef[0, col], ef[1, col], places=5, + msg=f"Column {col} should be symmetric") + + def test_no_nan(self): + """No NaN even with zero energies.""" + positions = np.array([[0.0, 0.0], [10.0, 0.0]]) + times = np.array([500.0, 500.0]) + energies = np.array([0.0, 0.0]) + ei = np.array([[0, 1], [1, 0]]) + ef = compute_edge_features(positions, times, energies, ei) + self.assertFalse(np.any(np.isnan(ef))) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/CaloClusterGNN/tests/test_inference.py b/CaloClusterGNN/tests/test_inference.py new file mode 100644 index 0000000..e1f152e --- /dev/null +++ b/CaloClusterGNN/tests/test_inference.py @@ -0,0 +1,179 @@ +"""Unit tests for src/inference/cluster_reco.py.""" + +import unittest +import numpy as np +import torch + +from src.inference.cluster_reco import ( + symmetrize_edge_scores, + reconstruct_clusters, +) + + +class TestSymmetrizeEdgeScores(unittest.TestCase): + """Tests for directed → undirected edge score averaging.""" + + def test_bidirectional_pair(self): + """Two directed edges (0→1, 1→0) average to one undirected edge.""" + edge_index = np.array([[0, 1], [1, 0]]) + probs = np.array([0.8, 0.6]) + ei, ep = symmetrize_edge_scores(edge_index, probs) + self.assertEqual(ei.shape[1], 1) + self.assertAlmostEqual(ep[0], 0.7, places=5) + + def test_single_direction(self): + """Edge in one direction only keeps its original score.""" + edge_index = np.array([[0], [1]]) + probs = np.array([0.9]) + ei, ep = symmetrize_edge_scores(edge_index, probs) + self.assertEqual(ei.shape[1], 1) + self.assertAlmostEqual(ep[0], 0.9, places=5) + + def test_canonical_ordering(self): + """Undirected edges always have i < j.""" + edge_index = np.array([[3, 1], [1, 3]]) + probs = np.array([0.4, 0.6]) + ei, ep = symmetrize_edge_scores(edge_index, probs) + self.assertTrue(ei[0, 0] < ei[1, 0]) + + def test_multiple_pairs(self): + """Multiple pairs symmetrized independently.""" + edge_index = np.array([[0, 1, 2, 3], [1, 0, 3, 2]]) + probs = np.array([0.8, 0.6, 0.9, 0.7]) + ei, ep = symmetrize_edge_scores(edge_index, probs) + self.assertEqual(ei.shape[1], 2) + # Sort by first node for consistent comparison + order = np.argsort(ei[0]) + self.assertAlmostEqual(ep[order[0]], 0.7, places=5) # (0,1) + self.assertAlmostEqual(ep[order[1]], 0.8, places=5) # (2,3) + + +class TestReconstructClusters(unittest.TestCase): + """Tests for full cluster reconstruction pipeline.""" + + def test_two_clusters(self): + """4 nodes, 2 pairs connected → 2 clusters.""" + # 0-1 connected, 2-3 connected + edge_index = np.array([[0, 1, 2, 3], [1, 0, 3, 2]]) + logits = np.array([5.0, 5.0, 5.0, 5.0]) # high sigmoid → ~1.0 + energies = np.array([100.0, 100.0, 100.0, 100.0]) + + labels, probs = reconstruct_clusters( + edge_index, logits, n_nodes=4, energies=energies, + tau_edge=0.5, min_hits=1, min_energy_mev=0.0, + ) + # Should have 2 clusters + self.assertEqual(len(np.unique(labels[labels >= 0])), 2) + # Nodes 0 and 1 in same cluster + self.assertEqual(labels[0], labels[1]) + # Nodes 2 and 3 in same cluster + self.assertEqual(labels[2], labels[3]) + # But different from 0-1 + self.assertNotEqual(labels[0], labels[2]) + + def test_threshold_separates(self): + """Low-confidence edges are filtered out.""" + edge_index = np.array([[0, 1, 1, 2], [1, 0, 2, 1]]) + # 0↔1 high confidence, 1↔2 low confidence + logits = np.array([5.0, 5.0, -5.0, -5.0]) + energies = np.array([100.0, 100.0, 100.0]) + + labels, _ = reconstruct_clusters( + edge_index, logits, n_nodes=3, energies=energies, + tau_edge=0.5, min_hits=1, min_energy_mev=0.0, + ) + self.assertEqual(labels[0], labels[1]) + self.assertNotEqual(labels[0], labels[2]) + + def test_min_hits_cleanup(self): + """Clusters smaller than min_hits get label -1.""" + # Single edge connecting 0-1, node 2 isolated + edge_index = np.array([[0, 1], [1, 0]]) + logits = np.array([5.0, 5.0]) + energies = np.array([100.0, 100.0, 100.0]) + + labels, _ = reconstruct_clusters( + edge_index, logits, n_nodes=3, energies=energies, + tau_edge=0.5, min_hits=2, min_energy_mev=0.0, + ) + # 0-1 cluster kept (2 hits) + self.assertEqual(labels[0], labels[1]) + self.assertTrue(labels[0] >= 0) + # Node 2 is isolated → -1 + self.assertEqual(labels[2], -1) + + def test_min_energy_cleanup(self): + """Clusters below min_energy get label -1.""" + edge_index = np.array([[0, 1, 2, 3], [1, 0, 3, 2]]) + logits = np.array([5.0, 5.0, 5.0, 5.0]) + # Cluster 0-1 has 200 MeV, cluster 2-3 has 5 MeV + energies = np.array([100.0, 100.0, 2.5, 2.5]) + + labels, _ = reconstruct_clusters( + edge_index, logits, n_nodes=4, energies=energies, + tau_edge=0.5, min_hits=1, min_energy_mev=10.0, + ) + # 0-1 kept + self.assertTrue(labels[0] >= 0) + self.assertEqual(labels[0], labels[1]) + # 2-3 removed (5 MeV < 10 MeV threshold) + self.assertEqual(labels[2], -1) + self.assertEqual(labels[3], -1) + + def test_no_edges_above_threshold(self): + """All edges below threshold → all nodes unclustered.""" + edge_index = np.array([[0, 1], [1, 0]]) + logits = np.array([-5.0, -5.0]) # sigmoid ~ 0.007 + + labels, _ = reconstruct_clusters( + edge_index, logits, n_nodes=2, tau_edge=0.5, + ) + self.assertTrue(np.all(labels == -1)) + + def test_contiguous_relabeling(self): + """Cluster IDs are relabeled to 0, 1, 2, ... after cleanup.""" + # 3 disconnected pairs + edge_index = np.array([[0, 1, 2, 3, 4, 5], [1, 0, 3, 2, 5, 4]]) + logits = np.array([5.0, 5.0, 5.0, 5.0, 5.0, 5.0]) + energies = np.array([50.0, 50.0, 50.0, 50.0, 50.0, 50.0]) + + labels, _ = reconstruct_clusters( + edge_index, logits, n_nodes=6, energies=energies, + tau_edge=0.5, min_hits=1, min_energy_mev=0.0, + ) + valid = labels[labels >= 0] + unique = np.unique(valid) + # Should be 0, 1, 2 + np.testing.assert_array_equal(unique, np.arange(len(unique))) + + def test_torch_tensors_accepted(self): + """Function accepts torch Tensors (auto-converts).""" + edge_index = torch.tensor([[0, 1], [1, 0]]) + logits = torch.tensor([5.0, 5.0]) + energies = torch.tensor([100.0, 100.0]) + + labels, probs = reconstruct_clusters( + edge_index, logits, n_nodes=2, energies=energies, + tau_edge=0.5, min_hits=1, min_energy_mev=0.0, + ) + self.assertEqual(labels[0], labels[1]) + self.assertTrue(labels[0] >= 0) + + def test_no_symmetrize(self): + """symmetrize=False uses directed scores directly.""" + # Only one direction: 0→1 high, but no 1→0 + edge_index = np.array([[0], [1]]) + logits = np.array([5.0]) + energies = np.array([100.0, 100.0]) + + labels, _ = reconstruct_clusters( + edge_index, logits, n_nodes=2, energies=energies, + tau_edge=0.5, min_hits=1, min_energy_mev=0.0, + symmetrize=False, + ) + # Still connected (directed edge above threshold) + self.assertEqual(labels[0], labels[1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/CaloClusterGNN/tests/test_postprocess.py b/CaloClusterGNN/tests/test_postprocess.py new file mode 100644 index 0000000..39e9681 --- /dev/null +++ b/CaloClusterGNN/tests/test_postprocess.py @@ -0,0 +1,144 @@ +"""Unit tests for src/inference/postprocess.py.""" + +import unittest +import numpy as np + +from src.inference.postprocess import ( + compute_cluster_features, + compute_summary_statistics, +) + + +class TestComputeClusterFeatures(unittest.TestCase): + """Tests for per-cluster feature computation.""" + + def test_single_cluster(self): + """Two hits in one cluster → correct centroid and energy.""" + labels = np.array([0, 0]) + positions = np.array([[0.0, 0.0], [10.0, 0.0]]) + energies = np.array([100.0, 100.0]) + times = np.array([500.0, 500.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + self.assertEqual(len(clusters), 1) + + c = clusters[0] + self.assertEqual(c["n_hits"], 2) + self.assertAlmostEqual(c["total_energy"], 200.0) + self.assertAlmostEqual(c["centroid_x"], 5.0) # midpoint + self.assertAlmostEqual(c["centroid_y"], 0.0) + self.assertAlmostEqual(c["time"], 500.0) + self.assertAlmostEqual(c["max_hit_fraction"], 0.5) + + def test_energy_weighted_centroid(self): + """Centroid pulled toward higher-energy hit.""" + labels = np.array([0, 0]) + positions = np.array([[0.0, 0.0], [10.0, 0.0]]) + energies = np.array([300.0, 100.0]) # 3:1 ratio + times = np.array([500.0, 600.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + c = clusters[0] + # Centroid: (300*0 + 100*10) / 400 = 2.5 + self.assertAlmostEqual(c["centroid_x"], 2.5) + # Time: (300*500 + 100*600) / 400 = 525 + self.assertAlmostEqual(c["time"], 525.0) + self.assertAlmostEqual(c["max_hit_fraction"], 0.75) + + def test_rms_width(self): + """RMS width for symmetric 2-hit cluster.""" + labels = np.array([0, 0]) + positions = np.array([[-5.0, 0.0], [5.0, 0.0]]) + energies = np.array([100.0, 100.0]) + times = np.array([0.0, 0.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + c = clusters[0] + # Centroid at (0, 0). Each hit 5mm away. RMS = sqrt(25) = 5.0 + self.assertAlmostEqual(c["rms_width"], 5.0, places=3) + + def test_unclustered_hits_excluded(self): + """Hits with label -1 are excluded from all clusters.""" + labels = np.array([0, -1, 0]) + positions = np.array([[0.0, 0.0], [100.0, 100.0], [10.0, 0.0]]) + energies = np.array([100.0, 999.0, 100.0]) + times = np.array([0.0, 0.0, 0.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + self.assertEqual(len(clusters), 1) + self.assertEqual(clusters[0]["n_hits"], 2) + self.assertAlmostEqual(clusters[0]["total_energy"], 200.0) + + def test_multiple_clusters(self): + """Two separate clusters computed independently.""" + labels = np.array([0, 0, 1, 1]) + positions = np.array([[0.0, 0.0], [1.0, 0.0], [100.0, 0.0], [101.0, 0.0]]) + energies = np.array([50.0, 50.0, 200.0, 200.0]) + times = np.array([10.0, 10.0, 20.0, 20.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + self.assertEqual(len(clusters), 2) + + c0 = [c for c in clusters if c["cluster_id"] == 0][0] + c1 = [c for c in clusters if c["cluster_id"] == 1][0] + self.assertAlmostEqual(c0["total_energy"], 100.0) + self.assertAlmostEqual(c1["total_energy"], 400.0) + + def test_empty_labels(self): + """All unclustered → empty list.""" + labels = np.array([-1, -1, -1]) + positions = np.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]]) + energies = np.array([100.0, 100.0, 100.0]) + times = np.array([0.0, 0.0, 0.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + self.assertEqual(len(clusters), 0) + + def test_single_hit_cluster(self): + """Single-hit cluster has RMS width 0.""" + labels = np.array([0]) + positions = np.array([[5.0, 10.0]]) + energies = np.array([50.0]) + times = np.array([100.0]) + + clusters = compute_cluster_features(labels, positions, energies, times) + self.assertEqual(len(clusters), 1) + self.assertAlmostEqual(clusters[0]["rms_width"], 0.0) + self.assertAlmostEqual(clusters[0]["max_hit_fraction"], 1.0) + + def test_hit_indices_correct(self): + """hit_indices maps back to original array positions.""" + labels = np.array([-1, 0, -1, 0, -1]) + positions = np.zeros((5, 2)) + energies = np.array([0.0, 50.0, 0.0, 50.0, 0.0]) + times = np.zeros(5) + + clusters = compute_cluster_features(labels, positions, energies, times) + self.assertEqual(clusters[0]["hit_indices"], [1, 3]) + + +class TestComputeSummaryStatistics(unittest.TestCase): + """Tests for aggregate summary statistics.""" + + def test_basic_summary(self): + """Summary over two clusters.""" + clusters = [ + {"n_hits": 3, "total_energy": 100.0}, + {"n_hits": 5, "total_energy": 300.0}, + ] + s = compute_summary_statistics(clusters) + self.assertEqual(s["n_clusters"], 2) + self.assertAlmostEqual(s["mean_n_hits"], 4.0) + self.assertAlmostEqual(s["median_n_hits"], 4.0) + self.assertAlmostEqual(s["mean_energy"], 200.0) + self.assertAlmostEqual(s["median_energy"], 200.0) + + def test_empty_list(self): + """Empty cluster list returns zeros.""" + s = compute_summary_statistics([]) + self.assertEqual(s["n_clusters"], 0) + self.assertAlmostEqual(s["mean_n_hits"], 0.0) + + +if __name__ == "__main__": + unittest.main() diff --git a/CaloClusterGNN/tests/test_truth_labels.py b/CaloClusterGNN/tests/test_truth_labels.py new file mode 100644 index 0000000..fb1780f --- /dev/null +++ b/CaloClusterGNN/tests/test_truth_labels.py @@ -0,0 +1,173 @@ +"""Unit tests for src/data/truth_labels.py.""" + +import numpy as np +import sys +import unittest +from pathlib import Path + +# Ensure project root is on the path +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +from src.data.truth_labels import assign_mc_truth + + +class TestAssignMcTruth(unittest.TestCase): + """Tests for assign_mc_truth().""" + + def test_same_particle_positive(self): + """Two hits from the same SimParticle → edge label 1.""" + sim_ids = [[100], [100]] + edeps = [[5.0], [3.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert mask.all() + assert (y == 1).all() + assert not amb.any() + assert tc[0] == tc[1] # same truth cluster + + def test_different_particle_negative(self): + """Two hits from different SimParticles → edge label 0.""" + sim_ids = [[100], [200]] + edeps = [[5.0], [3.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert mask.all() + assert (y == 0).all() + assert tc[0] != tc[1] + + def test_ambiguous_hit_masked(self): + """Hit with purity < threshold → ambiguous, edge masked.""" + # Hit 0: 60% from particle 100, 40% from 200 → ambiguous (< 0.7) + # Hit 1: 100% from particle 100 → not ambiguous + sim_ids = [[100, 200], [100]] + edeps = [[0.6, 0.4], [1.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert amb[0] == True, "Hit 0 should be ambiguous" + assert amb[1] == False, "Hit 1 should not be ambiguous" + assert not mask.any(), "All edges involve ambiguous hit 0 → masked" + + def test_purity_threshold_boundary(self): + """Hit with purity == threshold → not ambiguous.""" + sim_ids = [[100, 200]] + edeps = [[0.7, 0.3]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + + _, _, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index, + purity_threshold=0.7) + assert not amb[0], "Purity == threshold should not be ambiguous" + assert tc[0] >= 0 + + def test_purity_just_below_threshold(self): + """Hit with purity just below threshold → ambiguous.""" + sim_ids = [[100, 200]] + edeps = [[0.69, 0.31]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + + _, _, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index, + purity_threshold=0.7) + assert amb[0], "Purity below threshold should be ambiguous" + assert tc[0] == -1 + + def test_different_disks_different_clusters(self): + """Same SimParticle on different disks → different truth clusters.""" + sim_ids = [[100], [100]] + edeps = [[5.0], [3.0]] + disks = np.array([0, 1]) + edge_index = np.array([[0, 1], [1, 0]]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert mask.all() + assert (y == 0).all(), "Different disks → different clusters → label 0" + assert tc[0] != tc[1] + + def test_empty_simparticle_info(self): + """Hit with no SimParticle info → ambiguous.""" + sim_ids = [[], [100]] + edeps = [[], [1.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert amb[0] == True + assert tc[0] == -1 + assert not mask.any() + + def test_zero_energy_ambiguous(self): + """Hit with zero total energy → ambiguous.""" + sim_ids = [[100]] + edeps = [[0.0]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + + _, _, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert amb[0] == True + assert tc[0] == -1 + + def test_multi_hit_cluster(self): + """Multiple hits from the same particle → all in same truth cluster.""" + sim_ids = [[10], [10], [10], [20]] + edeps = [[1.0], [2.0], [3.0], [1.0]] + disks = np.array([0, 0, 0, 0]) + # Fully connected edges among 0,1,2 + edges to 3 + edge_index = np.array([ + [0, 0, 1, 1, 2, 2, 0, 3], + [1, 2, 0, 2, 0, 1, 3, 0], + ]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert mask.all() + assert not amb.any() + # Hits 0,1,2 share cluster; hit 3 is different + assert tc[0] == tc[1] == tc[2] + assert tc[3] != tc[0] + # Edges within {0,1,2} → label 1; edges to 3 → label 0 + for idx in range(6): + assert y[idx] == 1, f"Edge {idx} within same particle should be 1" + assert y[6] == 0 and y[7] == 0 + + def test_output_shapes_and_dtypes(self): + """Check shapes and dtypes of all outputs.""" + sim_ids = [[100], [200], [100]] + edeps = [[1.0], [1.0], [1.0]] + disks = np.array([0, 0, 0]) + edge_index = np.array([[0, 1, 2], [1, 2, 0]]) + + y, mask, tc, amb = assign_mc_truth(sim_ids, edeps, disks, edge_index) + assert y.shape == (3,) + assert mask.shape == (3,) + assert tc.shape == (3,) + assert amb.shape == (3,) + assert y.dtype == np.int64 + assert mask.dtype == bool + assert tc.dtype == np.int64 + assert amb.dtype == bool + + def test_custom_purity_threshold(self): + """Custom purity threshold changes ambiguity classification.""" + sim_ids = [[100, 200]] + edeps = [[0.55, 0.45]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + + # With default 0.7 → ambiguous + _, _, _, amb_strict = assign_mc_truth(sim_ids, edeps, disks, edge_index, + purity_threshold=0.7) + assert amb_strict[0] == True + + # With threshold 0.5 → not ambiguous + _, _, _, amb_loose = assign_mc_truth(sim_ids, edeps, disks, edge_index, + purity_threshold=0.5) + assert amb_loose[0] == False + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/CaloClusterGNN/tests/test_truth_labels_primary.py b/CaloClusterGNN/tests/test_truth_labels_primary.py new file mode 100644 index 0000000..a929daa --- /dev/null +++ b/CaloClusterGNN/tests/test_truth_labels_primary.py @@ -0,0 +1,255 @@ +"""Unit tests for src/data/truth_labels_primary.py.""" + +import numpy as np +import sys +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +from src.data.truth_labels_primary import ( + assign_mc_truth_primary, + build_calo_root_map, +) + + +class TestBuildCaloRootMap(unittest.TestCase): + """Tests for build_calo_root_map().""" + + def test_no_ancestors_self_is_root(self): + """SimParticle with no ancestors in calo → itself is calo-entrant.""" + sim_ids = [10] + ancestors = [[]] # no ancestors in calo + hit_sim_ids = [[10]] + hit_cry_ids = [100] + crystal_disk = {100: 0} + + m = build_calo_root_map(sim_ids, ancestors, hit_sim_ids, + hit_cry_ids, crystal_disk) + self.assertEqual(m[(10, 0)], 10) + + def test_ancestor_in_same_disk(self): + """SimParticle whose ancestor also deposited in same disk → ancestor is root.""" + sim_ids = [4, 5] + ancestors = [[], [4]] # SimP 5's parent is 4 + hit_sim_ids = [[4], [5]] + hit_cry_ids = [100, 101] + crystal_disk = {100: 0, 101: 0} + + m = build_calo_root_map(sim_ids, ancestors, hit_sim_ids, + hit_cry_ids, crystal_disk) + self.assertEqual(m[(5, 0)], 4, "SimP 5 should root to 4 on disk 0") + self.assertEqual(m[(4, 0)], 4, "SimP 4 is its own root") + + def test_cross_disk_secondary_is_own_root(self): + """Secondary that crosses from disk 0 to disk 1 → own root on disk 1.""" + # SimP 4 deposits in disk 0, SimP 5 (child of 4) deposits in disk 1 + sim_ids = [4, 5] + ancestors = [[], [4]] + hit_sim_ids = [[4], [5]] + hit_cry_ids = [100, 700] + crystal_disk = {100: 0, 700: 1} + + m = build_calo_root_map(sim_ids, ancestors, hit_sim_ids, + hit_cry_ids, crystal_disk) + self.assertEqual(m[(4, 0)], 4) + self.assertEqual(m[(5, 1)], 5, + "SimP 5 on disk 1 should be its own root (parent 4 not on disk 1)") + + def test_deep_chain(self): + """Multi-level ancestry: grandchild → child → parent, all on same disk.""" + sim_ids = [1, 2, 3] + ancestors = [[], [1], [2, 1]] # 3→2→1 + hit_sim_ids = [[1], [2], [3]] + hit_cry_ids = [100, 101, 102] + crystal_disk = {100: 0, 101: 0, 102: 0} + + m = build_calo_root_map(sim_ids, ancestors, hit_sim_ids, + hit_cry_ids, crystal_disk) + self.assertEqual(m[(3, 0)], 1, "SimP 3 should root to 1 (highest in chain on disk 0)") + self.assertEqual(m[(2, 0)], 1, "SimP 2 should root to 1") + self.assertEqual(m[(1, 0)], 1) + + def test_gap_in_chain(self): + """Ancestor chain has gaps (intermediate not in calomcsim).""" + # SimP 10 → SimP 6 (not in calo) → SimP 4 (in calo) + sim_ids = [4, 10] + ancestors = [[], [6, 4]] # 6 not in sim_ids + hit_sim_ids = [[4], [10]] + hit_cry_ids = [100, 101] + crystal_disk = {100: 0, 101: 0} + + m = build_calo_root_map(sim_ids, ancestors, hit_sim_ids, + hit_cry_ids, crystal_disk) + self.assertEqual(m[(10, 0)], 4, + "SimP 10 should root to 4 (skip gap at 6)") + + def test_both_disks(self): + """SimParticle depositing in both disks → separate entries per disk.""" + sim_ids = [10] + ancestors = [[]] + hit_sim_ids = [[10], [10]] + hit_cry_ids = [100, 700] + crystal_disk = {100: 0, 700: 1} + + m = build_calo_root_map(sim_ids, ancestors, hit_sim_ids, + hit_cry_ids, crystal_disk) + self.assertEqual(m[(10, 0)], 10) + self.assertEqual(m[(10, 1)], 10) + + +class TestAssignMcTruthPrimary(unittest.TestCase): + """Tests for assign_mc_truth_primary().""" + + def _simple_root_map(self, mapping): + """Helper: build calo_root_map from {(pid, disk): root} dict.""" + return mapping + + def test_same_shower_positive(self): + """Two hits from different SimPs but same calo-root → edge label 1.""" + sim_ids = [[4], [5]] + edeps = [[5.0], [3.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + root_map = {(4, 0): 4, (5, 0): 4} + + y, mask, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertTrue(mask.all()) + self.assertTrue((y == 1).all(), + "Same calo-root → same cluster → label 1") + self.assertEqual(tc[0], tc[1]) + + def test_different_showers_negative(self): + """Two hits from different calo-roots → edge label 0.""" + sim_ids = [[4], [200]] + edeps = [[5.0], [3.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + root_map = {(4, 0): 4, (200, 0): 200} + + y, mask, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertTrue(mask.all()) + self.assertTrue((y == 0).all()) + self.assertNotEqual(tc[0], tc[1]) + + def test_ambiguity_resolved_by_grouping(self): + """Hit with two SimPs from same root → NOT ambiguous (deposits sum).""" + # Hit 0: 40% from SimP 5, 60% from SimP 11 — both root to 4 + sim_ids = [[5, 11]] + edeps = [[0.4, 0.6]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + root_map = {(5, 0): 4, (11, 0): 4} + + _, _, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertFalse(amb[0], + "Both SimPs root to 4 → purity 100% → not ambiguous") + self.assertGreaterEqual(tc[0], 0) + + def test_still_ambiguous_different_roots(self): + """Hit with two SimPs from different roots below threshold → ambiguous.""" + sim_ids = [[100, 200]] + edeps = [[0.6, 0.4]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + root_map = {(100, 0): 100, (200, 0): 200} + + _, _, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertTrue(amb[0], "Different roots, purity 60% < 70% → ambiguous") + self.assertEqual(tc[0], -1) + + def test_cross_disk_separate_clusters(self): + """Same calo-root on different disks → different truth clusters.""" + sim_ids = [[10], [10]] + edeps = [[5.0], [3.0]] + disks = np.array([0, 1]) + edge_index = np.array([[0, 1], [1, 0]]) + root_map = {(10, 0): 10, (10, 1): 10} + + y, mask, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertTrue(mask.all()) + self.assertTrue((y == 0).all(), + "Different disks → different clusters") + self.assertNotEqual(tc[0], tc[1]) + + def test_fallback_when_pid_not_in_root_map(self): + """SimParticle not in calo_root_map → falls back to pid itself.""" + sim_ids = [[999]] + edeps = [[1.0]] + disks = np.array([0]) + edge_index = np.empty((2, 0), dtype=np.int64) + root_map = {} # empty + + _, _, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertFalse(amb[0]) + self.assertGreaterEqual(tc[0], 0) + + def test_empty_hit(self): + """Hit with no SimParticle info → ambiguous.""" + sim_ids = [[], [100]] + edeps = [[], [1.0]] + disks = np.array([0, 0]) + edge_index = np.array([[0, 1], [1, 0]]) + root_map = {(100, 0): 100} + + y, mask, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertTrue(amb[0]) + self.assertEqual(tc[0], -1) + self.assertFalse(mask.any()) + + def test_output_shapes_and_dtypes(self): + """Check shapes and dtypes of all outputs.""" + sim_ids = [[10], [20], [10]] + edeps = [[1.0], [1.0], [1.0]] + disks = np.array([0, 0, 0]) + edge_index = np.array([[0, 1, 2], [1, 2, 0]]) + root_map = {(10, 0): 10, (20, 0): 20} + + y, mask, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertEqual(y.shape, (3,)) + self.assertEqual(mask.shape, (3,)) + self.assertEqual(tc.shape, (3,)) + self.assertEqual(amb.shape, (3,)) + self.assertEqual(y.dtype, np.int64) + self.assertEqual(mask.dtype, bool) + self.assertEqual(tc.dtype, np.int64) + self.assertEqual(amb.dtype, bool) + + def test_multi_hit_shower(self): + """Multiple hits from same shower (different SimPs) → same cluster.""" + # SimP 4 (signal), SimP 5 (brem from 4), SimP 11 (brem from shower) + sim_ids = [[4], [5], [11], [200]] + edeps = [[10.0], [2.0], [1.5], [5.0]] + disks = np.array([0, 0, 0, 0]) + edge_index = np.array([ + [0, 0, 1, 1, 2, 2, 0, 3], + [1, 2, 0, 2, 0, 1, 3, 0], + ]) + root_map = {(4, 0): 4, (5, 0): 4, (11, 0): 4, (200, 0): 200} + + y, mask, tc, amb = assign_mc_truth_primary( + sim_ids, edeps, disks, edge_index, root_map) + self.assertTrue(mask.all()) + self.assertFalse(amb.any()) + # Hits 0,1,2 share calo-root 4 → same cluster + self.assertEqual(tc[0], tc[1]) + self.assertEqual(tc[1], tc[2]) + # Hit 3 (root 200) → different cluster + self.assertNotEqual(tc[0], tc[3]) + # Edges within shower → 1, edges to 200 → 0 + for idx in range(6): + self.assertEqual(y[idx], 1) + self.assertEqual(y[6], 0) + self.assertEqual(y[7], 0) + + +if __name__ == "__main__": + unittest.main(verbosity=2) From 8b9ae23e143efadbfb082fc05058273a830d9d9a Mon Sep 17 00:00:00 2001 From: Sam Zhou Date: Mon, 4 May 2026 15:17:27 -0500 Subject: [PATCH 2/3] Add SimpleEdgeNet deploy wrapper + ONNX export pipeline Both trained models in CaloClusterGNN/ now have a complete training- to-ONNX path inside MLTrain (consistent with the TrkQual pattern of shipping conversion scripts alongside training). New / restored: * src/models/calo_cluster_net_deploy.py tensor-API wrapper around CaloClusterNet (no PyG Data, no node-saliency head); used by ONNX export so torch.onnx.export can trace it. * src/models/simple_edge_net_deploy.py same shape for SimpleEdgeNet. No node head to bypass, so it's a thin pass-through. * scripts/export_onnx.py --model {ccn,sen} flag with per-model presets (checkpoint, output path, model_version). Stamps metadata_props {model_version, node_features, edge_features} into the .onnx after export. * scripts/export_norm_stats.py writes the train-split z-score stats next to the .onnx as a flat JSON sidecar so the C++ side doesn't need a LibTorch dep to read 28 floats. * scripts/validate_onnx.py --model flag with per-model preset for tau_edge and tolerance. Asserts: - max abs-diff edge_logits within tol on the full val split - zero per-edge threshold flips at tau_edge (proxy for cluster- reco byte-equivalence with the deployed C++ pipeline) * tests/test_calo_cluster_net_deploy.py (9 tests) * tests/test_export_onnx.py (5 tests) * tests/test_export_norm_stats.py (8 tests) README extended with an "Exporting a Trained Model to ONNX" section that documents the full chain for both models, the metadata_props deployment contract, and the per-model frozen tau_edge/tol values used by validate_onnx.py. Test count goes from 88 to 110 (4 conditionally skipped on a fresh checkout when no trained checkpoint is present locally; this is by design and the skip messages name the missing file). Also acknowledges Claude assistance in README. --- CaloClusterGNN/README.md | 39 ++++ CaloClusterGNN/scripts/export_norm_stats.py | 97 ++++++++ CaloClusterGNN/scripts/export_onnx.py | 190 +++++++++++++++ CaloClusterGNN/scripts/validate_onnx.py | 217 ++++++++++++++++++ .../src/models/calo_cluster_net_deploy.py | 108 +++++++++ .../src/models/simple_edge_net_deploy.py | 96 ++++++++ .../tests/test_calo_cluster_net_deploy.py | 179 +++++++++++++++ .../tests/test_export_norm_stats.py | 130 +++++++++++ CaloClusterGNN/tests/test_export_onnx.py | 97 ++++++++ 9 files changed, 1153 insertions(+) create mode 100644 CaloClusterGNN/scripts/export_norm_stats.py create mode 100644 CaloClusterGNN/scripts/export_onnx.py create mode 100644 CaloClusterGNN/scripts/validate_onnx.py create mode 100644 CaloClusterGNN/src/models/calo_cluster_net_deploy.py create mode 100644 CaloClusterGNN/src/models/simple_edge_net_deploy.py create mode 100644 CaloClusterGNN/tests/test_calo_cluster_net_deploy.py create mode 100644 CaloClusterGNN/tests/test_export_norm_stats.py create mode 100644 CaloClusterGNN/tests/test_export_onnx.py diff --git a/CaloClusterGNN/README.md b/CaloClusterGNN/README.md index cb51022..d3f08fe 100644 --- a/CaloClusterGNN/README.md +++ b/CaloClusterGNN/README.md @@ -149,6 +149,36 @@ Five steps, end to end. All paths are relative to `CaloClusterGNN/`. python3 scripts/failure_audit.py ``` +### Exporting a Trained Model to ONNX + +Two artifacts ship to deployment per model: the `.onnx` itself and a +small JSON sidecar containing the train-split z-score normalisation +stats (so the C++ side doesn't need a LibTorch dependency to read 28 +floats). Both are stamped with `metadata_props` carrying +`model_version`, `node_features`, `edge_features`. The C++ session +loader asserts these against FHiCL expectations at job start, so any +silent layout drift after a retraining is caught loudly. + +```bash +# After training a CCN run (the production model) +python3 scripts/export_onnx.py --model ccn # -> outputs/onnx/calo_cluster_net_v2_stage1.onnx +python3 scripts/export_norm_stats.py # -> outputs/onnx/...norm.json + +# Or for SimpleEdgeNet +python3 scripts/export_onnx.py --model sen # -> outputs/onnx/simple_edge_net_v2.onnx + +# Validate PyTorch <-> ONNX Runtime parity on the full val set +python3 scripts/validate_onnx.py --model ccn # max abs diff <= 1e-5, 0 threshold flips at tau=0.20 +python3 scripts/validate_onnx.py --model sen # max abs diff <= 5e-3, 0 threshold flips at tau=0.26 +``` + +`scripts/export_onnx.py` knows the per-model preset (checkpoint +path, output path, `model_version` string) and ships the +`metadata_props` keys in one call. To ship a new model release, +update Mu2e/Offline `Offline/CaloCluster/data/` (or the configured +Mu2e data area) with the new artifact, bump the FHiCL +`expectedModelVersion`, and re-run the C++ parity test. + ### Frozen Recipe Values These match the deployment defaults (see Deployment below). If you @@ -193,6 +223,15 @@ up by `art::ConfigFileLookupPolicy` at job start. The deployment-side parity gate (Python pipeline vs C++ Offline pipeline, byte-exact on cluster labels) lives in the Mu2e/Offline PR for this work. +## Acknowledgements + +Development of this training pipeline was assisted by Anthropic's +Claude (Claude Code). All scientific decisions, hyperparameter +choices, validation results, and the v1->v2 truth-definition +campaign are the author's own work; Claude was used as a coding +assistant for implementation, refactoring, code review, and +documentation drafting. + ## License This subdirectory inherits the MLTrain repository LICENSE. diff --git a/CaloClusterGNN/scripts/export_norm_stats.py b/CaloClusterGNN/scripts/export_norm_stats.py new file mode 100644 index 0000000..3c33dba --- /dev/null +++ b/CaloClusterGNN/scripts/export_norm_stats.py @@ -0,0 +1,97 @@ +"""Export train-split z-score statistics to a JSON sidecar for the C++ deployment. + +The training pipeline writes `data/normalization_stats.pt` (a torch blob). +The C++ `art::EDProducer` consuming the `.onnx` should not need a LibTorch +dependency to read 28 floats — this script writes the same stats out as a +plain JSON file alongside the `.onnx`. + +Default input/output match the v2_stage1 deployment artifact: + + python3 scripts/export_norm_stats.py + python3 scripts/export_norm_stats.py --stats --output + +This is step 16a in docs/plan.md. +""" + +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import torch + +# Names match the tables in docs/onnx_deployment.md §3. Order is the +# canonical feature index — the C++ side should assert against this. +NODE_FEATURE_NAMES = ["log_e", "t", "x", "y", "r", "e_rel"] +EDGE_FEATURE_NAMES = ["dx", "dy", "d", "dt", "dlog_e", "asym_e", "logsum_e", "dr"] + + +def stats_to_dict(stats: dict) -> dict: + """Convert a torch-blob stats dict to a JSON-ready dict. + + Layout is deliberately flat and self-describing so a C++ JSON parser + can pick out exactly the fields it needs. + """ + node_mean = stats["node_mean"].tolist() + node_std = stats["node_std"].tolist() + edge_mean = stats["edge_mean"].tolist() + edge_std = stats["edge_std"].tolist() + + if len(node_mean) != len(NODE_FEATURE_NAMES): + raise ValueError( + f"Expected {len(NODE_FEATURE_NAMES)} node features, got {len(node_mean)}" + ) + if len(edge_mean) != len(EDGE_FEATURE_NAMES): + raise ValueError( + f"Expected {len(EDGE_FEATURE_NAMES)} edge features, got {len(edge_mean)}" + ) + + return { + "schema_version": 1, + "node_features": NODE_FEATURE_NAMES, + "edge_features": EDGE_FEATURE_NAMES, + "node_mean": node_mean, + "node_std": node_std, + "edge_mean": edge_mean, + "edge_std": edge_std, + "node_count": int(stats["node_count"]), + "edge_count": int(stats["edge_count"]), + } + + +def main(): + parser = argparse.ArgumentParser(description=__doc__.split("\n\n")[0]) + parser.add_argument( + "--stats", type=Path, + default=Path("data/normalization_stats.pt"), + help="Input torch blob with node/edge mean/std tensors.", + ) + parser.add_argument( + "--output", type=Path, + default=Path("outputs/onnx/calo_cluster_net_v2_stage1.norm.json"), + help="JSON sidecar destination (next to the .onnx).", + ) + args = parser.parse_args() + + stats = torch.load(args.stats, weights_only=True) + payload = stats_to_dict(stats) + + args.output.parent.mkdir(parents=True, exist_ok=True) + with args.output.open("w") as f: + json.dump(payload, f, indent=2) + f.write("\n") + + size = args.output.stat().st_size + print(f"Read {args.stats}") + print(f"Wrote {args.output} ({size} bytes)") + print(f" node features: {payload['node_features']}") + print(f" edge features: {payload['edge_features']}") + print(f" node_count={payload['node_count']:,} edge_count={payload['edge_count']:,}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/export_onnx.py b/CaloClusterGNN/scripts/export_onnx.py new file mode 100644 index 0000000..727c8b9 --- /dev/null +++ b/CaloClusterGNN/scripts/export_onnx.py @@ -0,0 +1,190 @@ +"""Export a trained CaloClusterNet to ONNX for C++ deployment. + +Wraps the checkpoint in CaloClusterNetDeploy (edge head only, tensor I/O) +and traces it with torch.onnx.export using a real normalised val graph +as dummy input. Hit count N and edge count E are marked dynamic so the +exported graph handles any disk-graph size. + +Defaults target the winning CCN+BFS10 checkpoint (v2_stage1). + + python3 scripts/export_onnx.py + python3 scripts/export_onnx.py --checkpoint --output + +This is step 15b in docs/plan.md. Parity validation against PyTorch +(step 15c) lives in a separate script. +""" + +from __future__ import annotations + +import argparse +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import onnx +import torch + +from src.models.calo_cluster_net_deploy import CaloClusterNetDeploy +from src.models.simple_edge_net_deploy import SimpleEdgeNetDeploy + +# Per-model presets — used when --model is one of the recognised values. +# Each entry pins the deploy wrapper, the run-dir checkpoint, the output +# filename, and the version string stamped into metadata_props. +MODEL_PRESETS = { + "ccn": { + "wrapper": CaloClusterNetDeploy, + "checkpoint": Path("outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt"), + "output": Path("outputs/onnx/calo_cluster_net_v2_stage1.onnx"), + "model_version": "calo-cluster-net-v2-stage1", + }, + "sen": { + "wrapper": SimpleEdgeNetDeploy, + "checkpoint": Path("outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt"), + "output": Path("outputs/onnx/simple_edge_net_v2.onnx"), + "model_version": "simple-edge-net-v2", + }, +} + +# Defaults stamped into the ONNX `metadata_props` map after export. The +# C++ deployment asserts these at session load (FHiCL passes the +# expected values); a mismatch aborts the job. Bump `model_version` +# on any layout-breaking change — new feature set, retrained weights, +# opset bump. Bump `node_features` / `edge_features` whenever the +# normalised feature columns change order or meaning, so the +# `CaloHitGraphMaker` can fail loudly instead of feeding scrambled +# tensors into the model. +DEFAULT_MODEL_VERSION = "calo-cluster-net-v2-stage1" +NODE_FEATURE_NAMES = ["log_e", "t", "x", "y", "r", "e_rel"] +EDGE_FEATURE_NAMES = ["dx", "dy", "d", "dt", "dlog_e", "asym_e", "logsum_e", "dr"] + + +def stamp_metadata_props(onnx_path: Path, version: str, + node_features: list[str], edge_features: list[str]) -> None: + """Set the deployment-contract entries on an ONNX file. + + Idempotent for the keys we own: removes any existing entries with + the same keys before appending. Other entries (e.g. PyTorch's own + producer info) are preserved. + """ + keys_we_own = {"model_version", "node_features", "edge_features"} + m = onnx.load(str(onnx_path)) + keep = [p for p in m.metadata_props if p.key not in keys_we_own] + del m.metadata_props[:] + m.metadata_props.extend(keep) + for key, value in ( + ("model_version", version), + ("node_features", ",".join(node_features)), + ("edge_features", ",".join(edge_features)), + ): + e = m.metadata_props.add() + e.key = key + e.value = value + onnx.save(m, str(onnx_path)) + + +def pick_example_graph(val_pt_path: Path, min_edges: int = 20): + """Return (x, edge_index, edge_attr) from the first non-trivial val graph. + + The tracer bakes in the example's shapes as the defaults for any + static dimensions. We pass a graph with reasonable N and E so the + trace doesn't happen to pick up degenerate shortcuts. + """ + graphs = torch.load(val_pt_path, weights_only=False, map_location="cpu") + for g in graphs: + if g.edge_index.size(1) >= min_edges and g.x.size(0) >= 5: + return g.x.contiguous(), g.edge_index.contiguous(), g.edge_attr.contiguous() + raise RuntimeError( + f"No graph with >= {min_edges} edges found in {val_pt_path}" + ) + + +def main(): + parser = argparse.ArgumentParser(description=__doc__.split("\n\n")[0]) + parser.add_argument( + "--model", choices=sorted(MODEL_PRESETS), default="ccn", + help="Which model to export. 'ccn' = CaloClusterNet (default); " + "'sen' = SimpleEdgeNet.", + ) + parser.add_argument( + "--checkpoint", type=Path, default=None, + help="Checkpoint path; defaults to the preset for --model.", + ) + parser.add_argument( + "--val-pt", type=Path, default=Path("data/processed/val.pt"), + help="Packed val graphs for dummy input.", + ) + parser.add_argument( + "--output", type=Path, default=None, + help="Output .onnx path; defaults to the preset for --model.", + ) + parser.add_argument( + "--opset", type=int, default=17, + help="ONNX opset version. 17+ is supported by ONNX Runtime 1.17+.", + ) + parser.add_argument( + "--model-version", type=str, default=None, + help="Stamped into ONNX metadata_props['model_version']. " + "Defaults to the preset for --model. C++ asserts this at " + "session load via FHiCL.", + ) + args = parser.parse_args() + + preset = MODEL_PRESETS[args.model] + if args.checkpoint is None: args.checkpoint = preset["checkpoint"] + if args.output is None: args.output = preset["output"] + if args.model_version is None: args.model_version = preset["model_version"] + Wrapper = preset["wrapper"] + + args.output.parent.mkdir(parents=True, exist_ok=True) + + print(f"Loading checkpoint: {args.checkpoint} (model={args.model})") + model = Wrapper.from_checkpoint(args.checkpoint) + n_params = sum(p.numel() for p in model.parameters()) + print(f"Model: {model.__class__.__name__} ({n_params:,} params, eval={not model.training})") + + print(f"Example input from: {args.val_pt}") + x, ei, ea = pick_example_graph(args.val_pt) + N, E = x.size(0), ei.size(1) + print(f" x : {tuple(x.shape)} dtype={x.dtype}") + print(f" edge_index : {tuple(ei.shape)} dtype={ei.dtype}") + print(f" edge_attr : {tuple(ea.shape)} dtype={ea.dtype}") + print(f" (N={N}, E={E})") + + with torch.no_grad(): + out = model(x, ei, ea) + print( + f" PyTorch edge_logits: shape={tuple(out.shape)} " + f"mean={out.mean().item():.4f} std={out.std().item():.4f} " + f"min={out.min().item():.3f} max={out.max().item():.3f}" + ) + + print(f"Exporting to: {args.output} (opset {args.opset})") + torch.onnx.export( + model, + (x, ei, ea), + str(args.output), + input_names=["x", "edge_index", "edge_attr"], + output_names=["edge_logits"], + dynamic_axes={ + "x": {0: "N"}, + "edge_index": {1: "E"}, + "edge_attr": {0: "E"}, + "edge_logits": {0: "E"}, + }, + opset_version=args.opset, + do_constant_folding=True, + ) + stamp_metadata_props( + args.output, args.model_version, + NODE_FEATURE_NAMES, EDGE_FEATURE_NAMES, + ) + size_mb = args.output.stat().st_size / (1024 * 1024) + print(f"Wrote {args.output} ({size_mb:.2f} MB)") + print(f" metadata_props.model_version = {args.model_version!r}") + print(f" metadata_props.node_features = {','.join(NODE_FEATURE_NAMES)!r}") + print(f" metadata_props.edge_features = {','.join(EDGE_FEATURE_NAMES)!r}") + + +if __name__ == "__main__": + main() diff --git a/CaloClusterGNN/scripts/validate_onnx.py b/CaloClusterGNN/scripts/validate_onnx.py new file mode 100644 index 0000000..4a1d974 --- /dev/null +++ b/CaloClusterGNN/scripts/validate_onnx.py @@ -0,0 +1,217 @@ +"""Validate parity: PyTorch CaloClusterNetDeploy vs exported ONNX model. + +Runs both runtimes over every non-trivial graph in data/processed/val.pt +and reports: + + - max and percentile edge-logit abs diffs across the whole val set + - dynamic-axes coverage (smallest and largest (N, E) actually exercised) + - per-edge threshold-decision flips at tau_edge (proxy for cluster + assembly parity — if logits match and thresholds don't flip, the + downstream deterministic post-processing agrees by construction) + - per-graph timing for both runtimes on CPU + +Exits 0 if max abs diff <= --tol (default 1e-5) AND no threshold flips. + +This is step 15c in docs/plan.md. Prerequisites: 15a (deploy wrapper), +15b (export script already run so that the .onnx file exists). +""" + +from __future__ import annotations + +import argparse +import sys +import time +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import numpy as np +import onnxruntime as ort +import torch + +from src.models.calo_cluster_net_deploy import CaloClusterNetDeploy +from src.models.simple_edge_net_deploy import SimpleEdgeNetDeploy + + +# Per-model presets matching scripts/export_onnx.py. +# `tol` is the max-abs-diff bar on raw logits. CCN's logits fall in +# [-5, +0] so 1e-5 is tight; SEN's logits are large-negative (~-1k to +# -3k) so the same arithmetic precision shows up as ~1e-3 abs error. +# What's load-bearing for cluster-reco correctness is the +# "threshold-flips at tau_edge" line, which must be zero for both. +_PRESETS = { + "ccn": { + "wrapper": CaloClusterNetDeploy, + "checkpoint": Path("outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt"), + "onnx": Path("outputs/onnx/calo_cluster_net_v2_stage1.onnx"), + "tau_edge": 0.20, + "tol": 1e-5, + }, + "sen": { + "wrapper": SimpleEdgeNetDeploy, + "checkpoint": Path("outputs/runs/simple_edge_net_v2/checkpoints/best_model.pt"), + "onnx": Path("outputs/onnx/simple_edge_net_v2.onnx"), + "tau_edge": 0.26, + "tol": 5e-3, + }, +} + + +def _sigmoid(x: np.ndarray) -> np.ndarray: + return 1.0 / (1.0 + np.exp(-x.astype(np.float64))) + + +def main() -> int: + parser = argparse.ArgumentParser(description=__doc__.split("\n\n")[0]) + parser.add_argument( + "--model", choices=sorted(_PRESETS), default="ccn", + help="Which model to validate. 'ccn' = CaloClusterNet (default); " + "'sen' = SimpleEdgeNet.", + ) + parser.add_argument( + "--checkpoint", type=Path, default=None, + help="Checkpoint path; defaults to the preset for --model.", + ) + parser.add_argument( + "--onnx", type=Path, default=None, + help=".onnx path; defaults to the preset for --model.", + ) + parser.add_argument( + "--val-pt", type=Path, default=Path("data/processed/val.pt"), + ) + parser.add_argument( + "--tol", type=float, default=None, + help="Max allowable abs diff in edge logits. " + "Defaults to the preset for --model " + "(CCN: 1e-5, SEN: 5e-3 -- SEN's raw logits are ~1000x larger).", + ) + parser.add_argument( + "--tau-edge", type=float, default=None, + help="Edge threshold for the decision-flip proxy. " + "Defaults to the preset for --model " + "(CCN: 0.20, SEN: 0.26).", + ) + parser.add_argument( + "--n-graphs", type=int, default=None, + help="Cap number of graphs to check. Default: all.", + ) + args = parser.parse_args() + + preset = _PRESETS[args.model] + if args.checkpoint is None: args.checkpoint = preset["checkpoint"] + if args.onnx is None: args.onnx = preset["onnx"] + if args.tau_edge is None: args.tau_edge = preset["tau_edge"] + if args.tol is None: args.tol = preset["tol"] + Wrapper = preset["wrapper"] + + if not args.onnx.exists(): + print(f"ONNX model not found: {args.onnx}", file=sys.stderr) + print("Run scripts/export_onnx.py first (15b).", file=sys.stderr) + return 2 + + print(f"PyTorch checkpoint: {args.checkpoint} (model={args.model})") + model = Wrapper.from_checkpoint(args.checkpoint) + + print(f"ONNX model: {args.onnx}") + sess = ort.InferenceSession(str(args.onnx), providers=["CPUExecutionProvider"]) + + print(f"Val graphs: {args.val_pt}") + val = torch.load(args.val_pt, weights_only=False, map_location="cpu") + total = len(val) + if args.n_graphs: + val = val[: args.n_graphs] + print(f"Checking {len(val)} of {total} graphs (tau_edge={args.tau_edge})") + + all_diffs: list[np.ndarray] = [] + per_graph_max: list[float] = [] + pytorch_ms: list[float] = [] + onnx_ms: list[float] = [] + ns: list[int] = [] + es: list[int] = [] + flips = 0 + n_edges_total = 0 + n_skipped = 0 + max_diff = 0.0 + + for i, g in enumerate(val): + E = int(g.edge_index.size(1)) + if E == 0: + n_skipped += 1 + continue + + x_np = g.x.numpy() + ei_np = g.edge_index.numpy() + ea_np = g.edge_attr.numpy() + + t0 = time.perf_counter() + with torch.no_grad(): + ref = model(g.x, g.edge_index, g.edge_attr).numpy() + pytorch_ms.append((time.perf_counter() - t0) * 1e3) + + t0 = time.perf_counter() + test = sess.run( + ["edge_logits"], + {"x": x_np, "edge_index": ei_np, "edge_attr": ea_np}, + )[0] + onnx_ms.append((time.perf_counter() - t0) * 1e3) + + diff = np.abs(ref - test) + all_diffs.append(diff) + per_graph_max.append(float(diff.max())) + max_diff = max(max_diff, float(diff.max())) + + # Threshold-decision agreement at tau_edge (proxy for cluster parity). + ref_above = _sigmoid(ref) >= args.tau_edge + test_above = _sigmoid(test) >= args.tau_edge + flips += int((ref_above != test_above).sum()) + n_edges_total += E + + ns.append(int(g.x.size(0))) + es.append(E) + + if (i + 1) % 500 == 0: + print(f" [{i + 1}/{len(val)}] running max diff = {max_diff:.2e} flips = {flips}") + + flat = np.concatenate(all_diffs) if all_diffs else np.array([0.0]) + n_checked = len(per_graph_max) + + print("\n=== Parity (edge logits) ===") + print(f"Graphs checked: {n_checked} (skipped {n_skipped} with E=0)") + print(f"Total edges: {n_edges_total:,}") + print(f"Max abs diff: {max_diff:.3e}") + print(f"Mean abs diff: {flat.mean():.3e}") + for p in (50, 90, 99, 99.9): + print(f"p{p:<5g}abs diff: {np.percentile(flat, p):.3e}") + + print("\n=== Dynamic-axes coverage ===") + print(f"N (hits): min={min(ns)} max={max(ns)} median={int(np.median(ns))}") + print(f"E (edges): min={min(es)} max={max(es)} median={int(np.median(es))}") + + print(f"\n=== Threshold decisions at tau={args.tau_edge} ===") + flip_pct = 100.0 * flips / n_edges_total if n_edges_total else 0.0 + print(f"Flipped decisions: {flips} / {n_edges_total:,} ({flip_pct:.4f}%)") + + print("\n=== CPU timing ===") + py = np.array(pytorch_ms) + on = np.array(onnx_ms) + print(f"PyTorch mean={py.mean():.2f} ms median={np.median(py):.2f} ms total={py.sum() / 1e3:.1f} s") + print(f"ONNX RT mean={on.mean():.2f} ms median={np.median(on):.2f} ms total={on.sum() / 1e3:.1f} s") + if np.median(on) > 0: + print(f"Speedup (PyTorch / ONNX median): {np.median(py) / np.median(on):.2f}x") + + ok_logits = max_diff <= args.tol + ok_flips = flips == 0 + print() + if ok_logits and ok_flips: + print(f"PASS: max diff {max_diff:.3e} <= {args.tol:.0e}, no threshold flips.") + return 0 + if not ok_logits: + print(f"FAIL: max abs diff {max_diff:.3e} > tol {args.tol:.0e}") + if not ok_flips: + print(f"FAIL: {flips} threshold flips at tau={args.tau_edge} " + f"(expected 0 given matched logits)") + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/CaloClusterGNN/src/models/calo_cluster_net_deploy.py b/CaloClusterGNN/src/models/calo_cluster_net_deploy.py new file mode 100644 index 0000000..5be561d --- /dev/null +++ b/CaloClusterGNN/src/models/calo_cluster_net_deploy.py @@ -0,0 +1,108 @@ +""" +CaloClusterNetDeploy — inference-only wrapper for ONNX export. + +CaloClusterNet was designed for multi-task training: it takes a PyG +``Data`` object and returns a dict ``{edge_logits, node_logits}``. Neither +is friendly for ``torch.onnx.export``: + + - ``Data`` objects can't be traced; exporters need plain tensors in. + - The node-saliency head is unused in the deployed recipe (CCN+BFS10 + uses only edge logits), and in the ``v2_stage1`` checkpoint the + node head was trained with ``lambda_node=0`` — its weights are + never-supervised noise that would only confuse a C++ caller. + +This wrapper composes a trained ``CaloClusterNet`` but exposes only the +edge path with a tensor-in / tensor-out forward signature. +""" + +from pathlib import Path + +import torch +import torch.nn as nn +import yaml + +from src.models import build_model +from src.models.calo_cluster_net import CaloClusterNet + + +class CaloClusterNetDeploy(nn.Module): + """Edge-only inference wrapper around a trained ``CaloClusterNet``. + + Reuses the submodules of ``full_model`` by reference (no weight copy), + so the wrapper stays in sync if ``full_model`` is moved to another + device. The node-saliency head is omitted entirely. + """ + + def __init__(self, full_model: CaloClusterNet): + super().__init__() + self.node_encoder = full_model.node_encoder + self.edge_encoder = full_model.edge_encoder + self.mp_blocks = full_model.mp_blocks + self.edge_head = full_model.edge_head + + def forward(self, x: torch.Tensor, edge_index: torch.Tensor, + edge_attr: torch.Tensor) -> torch.Tensor: + """Edge-logit inference. + + Parameters + ---------- + x : Tensor (N, 6) + Per-hit node features (already normalised). + edge_index : Tensor (2, E) + Directed edge list. + edge_attr : Tensor (E, 8) + Per-edge features (already normalised). + + Returns + ------- + edge_logits : Tensor (E,) + Raw logits. Apply sigmoid and threshold externally. + """ + h = self.node_encoder(x) + e = self.edge_encoder(edge_attr) + for block in self.mp_blocks: + h, e = block(h, e, edge_index) + return self.edge_head(h, e, edge_index) + + @classmethod + def from_checkpoint(cls, checkpoint_path, map_location="cpu"): + """Load a deploy wrapper from a trained CaloClusterNet checkpoint. + + Reads ``config.yaml`` from the run directory alongside the + checkpoint to rebuild the full model with the original + hyperparameters, then strips the node head. + + Parameters + ---------- + checkpoint_path : str or Path + Path to a ``best_model.pt`` produced by ``train_gnn.py``. + Expected layout: ``/checkpoints/best_model.pt`` with + the config at ``/config.yaml``. + map_location : str or torch.device + Passed through to ``torch.load``. Defaults to CPU. + + Returns + ------- + CaloClusterNetDeploy + In eval mode, ready for inference or ONNX export. + """ + checkpoint_path = Path(checkpoint_path) + run_dir = checkpoint_path.parent.parent + with open(run_dir / "config.yaml") as f: + cfg = yaml.safe_load(f) + + if cfg["model"].get("name") != "CaloClusterNet": + raise ValueError( + f"Deploy wrapper only supports CaloClusterNet, got " + f"{cfg['model'].get('name')} in {run_dir / 'config.yaml'}" + ) + + full = build_model(cfg) + ckpt = torch.load(checkpoint_path, weights_only=False, + map_location=map_location) + full.load_state_dict(ckpt["model_state_dict"]) + full.eval() + + wrapper = cls(full) + wrapper.eval() + return wrapper diff --git a/CaloClusterGNN/src/models/simple_edge_net_deploy.py b/CaloClusterGNN/src/models/simple_edge_net_deploy.py new file mode 100644 index 0000000..b30e3fb --- /dev/null +++ b/CaloClusterGNN/src/models/simple_edge_net_deploy.py @@ -0,0 +1,96 @@ +""" +SimpleEdgeNetDeploy -- inference-only wrapper for ONNX export. + +SimpleEdgeNet's forward takes a PyG ``Data`` object, which +``torch.onnx.export`` can't trace. This wrapper composes a trained +``SimpleEdgeNet`` and exposes a tensor-in / tensor-out forward signature +matching the ONNX deployment contract used by both this model and +CaloClusterNet (see calorimeter/GNN/docs/onnx_deployment.md). + +The model has no node head and no multi-task output, so the wrapper +is mostly a thin pass-through that re-implements the message-passing +loop with explicit tensor arguments. +""" + +from pathlib import Path + +import torch +import torch.nn as nn +from torch_geometric.utils import scatter +import yaml + +from src.models import build_model +from src.models.simple_edge_net import SimpleEdgeNet + + +class SimpleEdgeNetDeploy(nn.Module): + """Tensor-API inference wrapper around a trained ``SimpleEdgeNet``. + + Reuses the submodules of ``full_model`` by reference (no weight copy). + """ + + def __init__(self, full_model: SimpleEdgeNet): + super().__init__() + self.n_mp_layers = full_model.n_mp_layers + self.node_encoder = full_model.node_encoder + self.edge_encoder = full_model.edge_encoder + self.edge_updates = full_model.edge_updates + self.node_updates = full_model.node_updates + self.edge_head = full_model.edge_head + + def forward(self, x: torch.Tensor, edge_index: torch.Tensor, + edge_attr: torch.Tensor) -> torch.Tensor: + """Edge-logit inference. + + Parameters + ---------- + x : Tensor (N, 6) + Per-hit node features (already z-score normalised). + edge_index : Tensor (2, E) + Directed edge list. + edge_attr : Tensor (E, 8) + Per-edge features (already z-score normalised). + + Returns + ------- + edge_logits : Tensor (E,) + Raw logits. Apply sigmoid + threshold externally. + """ + h = self.node_encoder(x) + e = self.edge_encoder(edge_attr) + src, dst = edge_index + + for k in range(self.n_mp_layers): + e_in = torch.cat([h[src], h[dst], e], dim=1) + e = self.edge_updates[k](e_in) + agg = scatter(e, dst, dim=0, dim_size=h.size(0), reduce="sum") + h_in = torch.cat([h, agg], dim=1) + h = self.node_updates[k](h_in) + + edge_repr = torch.cat([h[src], h[dst], e], dim=1) + edge_logits = self.edge_head(edge_repr).squeeze(-1) + return edge_logits + + @classmethod + def from_checkpoint(cls, checkpoint_path, map_location="cpu"): + """Load a deploy wrapper from a trained SimpleEdgeNet checkpoint.""" + checkpoint_path = Path(checkpoint_path) + run_dir = checkpoint_path.parent.parent + with open(run_dir / "config.yaml") as f: + cfg = yaml.safe_load(f) + + if cfg["model"].get("name") != "SimpleEdgeNet": + raise ValueError( + f"Deploy wrapper only supports SimpleEdgeNet, got " + f"{cfg['model'].get('name')} in {run_dir / 'config.yaml'}" + ) + + full = build_model(cfg) + ckpt = torch.load(checkpoint_path, weights_only=False, + map_location=map_location) + full.load_state_dict(ckpt["model_state_dict"]) + full.eval() + + wrapper = cls(full) + wrapper.eval() + return wrapper diff --git a/CaloClusterGNN/tests/test_calo_cluster_net_deploy.py b/CaloClusterGNN/tests/test_calo_cluster_net_deploy.py new file mode 100644 index 0000000..c2abf67 --- /dev/null +++ b/CaloClusterGNN/tests/test_calo_cluster_net_deploy.py @@ -0,0 +1,179 @@ +"""Unit tests for CaloClusterNetDeploy — the ONNX-export wrapper.""" + +import sys +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import torch +from torch_geometric.data import Data + +from src.models.calo_cluster_net import CaloClusterNet +from src.models.calo_cluster_net_deploy import CaloClusterNetDeploy + + +def _make_graph(n_nodes=12, n_edges=30, node_dim=6, edge_dim=8, seed=0): + """Synthetic PyG Data object for testing. No labels/masks needed.""" + gen = torch.Generator().manual_seed(seed) + x = torch.randn(n_nodes, node_dim, generator=gen) + edge_index = torch.randint(0, n_nodes, (2, n_edges), generator=gen) + edge_attr = torch.randn(n_edges, edge_dim, generator=gen) + return Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + + +class TestDeployForward(unittest.TestCase): + """Shape, determinism, and tensor-in/tensor-out contract.""" + + def setUp(self): + torch.manual_seed(0) + self.full = CaloClusterNet(hidden_dim=32, n_mp_layers=2, dropout=0.0) + self.full.eval() + self.wrap = CaloClusterNetDeploy(self.full) + self.wrap.eval() + + def test_output_is_plain_tensor_not_dict(self): + g = _make_graph() + out = self.wrap(g.x, g.edge_index, g.edge_attr) + self.assertIsInstance(out, torch.Tensor) + self.assertEqual(out.shape, (g.edge_index.size(1),)) + self.assertEqual(out.dtype, torch.float32) + + def test_no_node_head_attribute(self): + """Wrapper should expose encoders/mp/edge head but not node head.""" + self.assertTrue(hasattr(self.wrap, "node_encoder")) + self.assertTrue(hasattr(self.wrap, "edge_encoder")) + self.assertTrue(hasattr(self.wrap, "mp_blocks")) + self.assertTrue(hasattr(self.wrap, "edge_head")) + self.assertFalse(hasattr(self.wrap, "node_head")) + + def test_eval_mode_deterministic(self): + g = _make_graph() + out1 = self.wrap(g.x, g.edge_index, g.edge_attr) + out2 = self.wrap(g.x, g.edge_index, g.edge_attr) + self.assertTrue(torch.allclose(out1, out2)) + + def test_varied_graph_sizes(self): + """Wrapper should handle graphs with different N, E (dynamic axes).""" + for n_nodes, n_edges in [(5, 10), (20, 80), (1, 1), (50, 300)]: + g = _make_graph(n_nodes=n_nodes, n_edges=n_edges, seed=n_nodes) + out = self.wrap(g.x, g.edge_index, g.edge_attr) + self.assertEqual(out.shape, (n_edges,)) + + +class TestParityWithFullModel(unittest.TestCase): + """Wrapper edge logits must match CaloClusterNet's edge_logits exactly.""" + + def test_edge_logits_match_full_model(self): + torch.manual_seed(42) + full = CaloClusterNet(hidden_dim=48, n_mp_layers=3, dropout=0.0) + full.eval() + wrap = CaloClusterNetDeploy(full) + wrap.eval() + + g = _make_graph(n_nodes=15, n_edges=40, seed=7) + + with torch.no_grad(): + ref = full(g)["edge_logits"] + test = wrap(g.x, g.edge_index, g.edge_attr) + + self.assertTrue(torch.allclose(ref, test, atol=1e-6), + f"max diff: {(ref - test).abs().max().item():.2e}") + + def test_shared_weights_not_copy(self): + """Wrapper should reuse submodules by reference, not clone weights.""" + full = CaloClusterNet(hidden_dim=16, n_mp_layers=2, dropout=0.0) + wrap = CaloClusterNetDeploy(full) + self.assertIs(wrap.node_encoder, full.node_encoder) + self.assertIs(wrap.edge_head, full.edge_head) + + +class TestFromCheckpoint(unittest.TestCase): + """Loading from a real trained checkpoint (skipped if unavailable).""" + + CHECKPOINT = Path("outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt") + + def setUp(self): + if not self.CHECKPOINT.exists(): + self.skipTest(f"Checkpoint not present: {self.CHECKPOINT}") + + def test_loads_and_runs(self): + wrap = CaloClusterNetDeploy.from_checkpoint(self.CHECKPOINT) + self.assertFalse(wrap.training, "wrapper should be in eval mode") + + g = _make_graph(n_nodes=30, n_edges=120, seed=1) + with torch.no_grad(): + out = wrap(g.x, g.edge_index, g.edge_attr) + self.assertEqual(out.shape, (120,)) + self.assertTrue(torch.isfinite(out).all()) + + def test_matches_full_model_on_same_checkpoint(self): + """Wrapper and full model loaded from the same ckpt must agree.""" + import yaml + from src.models import build_model + + run_dir = self.CHECKPOINT.parent.parent + with open(run_dir / "config.yaml") as f: + cfg = yaml.safe_load(f) + + full = build_model(cfg) + ckpt = torch.load(self.CHECKPOINT, weights_only=False, map_location="cpu") + full.load_state_dict(ckpt["model_state_dict"]) + full.eval() + + wrap = CaloClusterNetDeploy.from_checkpoint(self.CHECKPOINT) + + g = _make_graph(n_nodes=25, n_edges=90, seed=3) + with torch.no_grad(): + ref = full(g)["edge_logits"] + test = wrap(g.x, g.edge_index, g.edge_attr) + + self.assertTrue(torch.allclose(ref, test, atol=1e-6), + f"max diff: {(ref - test).abs().max().item():.2e}") + + +class TestRealValGraph(unittest.TestCase): + """Parity test on a real normalised val graph (skipped if unavailable).""" + + CHECKPOINT = Path("outputs/runs/calo_cluster_net_v2_stage1/checkpoints/best_model.pt") + VAL_PT = Path("data/processed/val.pt") + + def setUp(self): + if not self.CHECKPOINT.exists() or not self.VAL_PT.exists(): + self.skipTest("checkpoint or packed val graphs not present") + + def test_parity_on_val_graph(self): + import yaml + from src.models import build_model + + run_dir = self.CHECKPOINT.parent.parent + with open(run_dir / "config.yaml") as f: + cfg = yaml.safe_load(f) + full = build_model(cfg) + ckpt = torch.load(self.CHECKPOINT, weights_only=False, map_location="cpu") + full.load_state_dict(ckpt["model_state_dict"]) + full.eval() + + wrap = CaloClusterNetDeploy.from_checkpoint(self.CHECKPOINT) + + val_graphs = torch.load(self.VAL_PT, weights_only=False) + # Check first few non-trivial graphs + n_checked = 0 + for g in val_graphs[:20]: + if g.edge_index.size(1) == 0: + continue + with torch.no_grad(): + ref = full(g)["edge_logits"] + test = wrap(g.x, g.edge_index, g.edge_attr) + max_diff = (ref - test).abs().max().item() + self.assertTrue(torch.allclose(ref, test, atol=1e-6), + f"graph {n_checked}: max diff {max_diff:.2e}") + n_checked += 1 + if n_checked >= 5: + break + + self.assertGreater(n_checked, 0, "no graphs exercised") + + +if __name__ == "__main__": + unittest.main() diff --git a/CaloClusterGNN/tests/test_export_norm_stats.py b/CaloClusterGNN/tests/test_export_norm_stats.py new file mode 100644 index 0000000..930d82b --- /dev/null +++ b/CaloClusterGNN/tests/test_export_norm_stats.py @@ -0,0 +1,130 @@ +"""Unit tests for the C++ deployment normalisation sidecar export.""" + +import json +import sys +import tempfile +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import torch + +from scripts.export_norm_stats import ( + EDGE_FEATURE_NAMES, + NODE_FEATURE_NAMES, + stats_to_dict, +) + + +def _example_stats(): + """A stats dict shaped like the real `normalization_stats.pt`.""" + return { + "node_mean": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), + "node_std": torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]), + "edge_mean": torch.tensor([7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0]), + "edge_std": torch.tensor([0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4]), + "node_count": 100, + "edge_count": 200, + } + + +class TestStatsToDict(unittest.TestCase): + def test_payload_contains_all_required_keys(self): + payload = stats_to_dict(_example_stats()) + for key in ( + "schema_version", "node_features", "edge_features", + "node_mean", "node_std", "edge_mean", "edge_std", + "node_count", "edge_count", + ): + self.assertIn(key, payload) + + def test_feature_names_match_canonical_lists(self): + payload = stats_to_dict(_example_stats()) + self.assertEqual(payload["node_features"], NODE_FEATURE_NAMES) + self.assertEqual(payload["edge_features"], EDGE_FEATURE_NAMES) + self.assertEqual(len(payload["node_features"]), 6) + self.assertEqual(len(payload["edge_features"]), 8) + + def test_values_round_trip_bit_exact(self): + """Floats must survive tensor → list → tensor with bit-exact equality.""" + stats = _example_stats() + payload = stats_to_dict(stats) + for key in ("node_mean", "node_std", "edge_mean", "edge_std"): + recovered = torch.tensor(payload[key], dtype=stats[key].dtype) + self.assertTrue( + torch.equal(recovered, stats[key]), + f"{key} did not round-trip exactly: {recovered} vs {stats[key]}", + ) + + def test_counts_are_python_ints(self): + payload = stats_to_dict(_example_stats()) + self.assertIsInstance(payload["node_count"], int) + self.assertIsInstance(payload["edge_count"], int) + self.assertEqual(payload["node_count"], 100) + self.assertEqual(payload["edge_count"], 200) + + def test_wrong_node_dim_raises(self): + bad = _example_stats() + bad["node_mean"] = torch.zeros(5) + bad["node_std"] = torch.ones(5) + with self.assertRaises(ValueError): + stats_to_dict(bad) + + def test_wrong_edge_dim_raises(self): + bad = _example_stats() + bad["edge_mean"] = torch.zeros(7) + bad["edge_std"] = torch.ones(7) + with self.assertRaises(ValueError): + stats_to_dict(bad) + + +class TestSidecarFileIfPresent(unittest.TestCase): + """If the real sidecar exists, it must match the train-split torch blob.""" + + def setUp(self): + self.repo = Path(__file__).resolve().parents[1] + self.stats_pt = self.repo / "data" / "normalization_stats.pt" + self.sidecar = self.repo / "outputs" / "onnx" / "calo_cluster_net_v2_stage1.norm.json" + + def test_sidecar_matches_torch_blob(self): + if not self.stats_pt.exists() or not self.sidecar.exists(): + self.skipTest("sidecar or stats blob not present in this checkout") + stats = torch.load(self.stats_pt, weights_only=True) + with self.sidecar.open() as f: + payload = json.load(f) + for key in ("node_mean", "node_std", "edge_mean", "edge_std"): + recovered = torch.tensor(payload[key], dtype=stats[key].dtype) + self.assertTrue( + torch.equal(recovered, stats[key]), + f"sidecar {key} disagrees with {self.stats_pt}", + ) + self.assertEqual(payload["node_count"], int(stats["node_count"])) + self.assertEqual(payload["edge_count"], int(stats["edge_count"])) + + +class TestEndToEndExport(unittest.TestCase): + """Round-trip via the script entry point: torch blob -> JSON -> dict -> torch.""" + + def test_export_then_load_round_trip(self): + stats = _example_stats() + with tempfile.TemporaryDirectory() as tmp: + tmp = Path(tmp) + pt_path = tmp / "stats.pt" + json_path = tmp / "stats.norm.json" + torch.save(stats, pt_path) + + loaded = torch.load(pt_path, weights_only=True) + payload = stats_to_dict(loaded) + with json_path.open("w") as f: + json.dump(payload, f) + + with json_path.open() as f: + recovered = json.load(f) + for key in ("node_mean", "node_std", "edge_mean", "edge_std"): + t = torch.tensor(recovered[key], dtype=stats[key].dtype) + self.assertTrue(torch.equal(t, stats[key])) + + +if __name__ == "__main__": + unittest.main() diff --git a/CaloClusterGNN/tests/test_export_onnx.py b/CaloClusterGNN/tests/test_export_onnx.py new file mode 100644 index 0000000..2c263ee --- /dev/null +++ b/CaloClusterGNN/tests/test_export_onnx.py @@ -0,0 +1,97 @@ +"""Tests for the metadata_props stamping in scripts/export_onnx.py. + +The C++ deployment asserts these keys at session load (FHiCL passes +the expected values). Round-trip via a tiny synthetic ONNX file — +fast, no checkpoint or torch-onnx export needed. +""" + +import sys +import tempfile +import unittest +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import onnx +from onnx import helper, TensorProto + +from scripts.export_onnx import ( + DEFAULT_MODEL_VERSION, + EDGE_FEATURE_NAMES, + NODE_FEATURE_NAMES, + stamp_metadata_props, +) + + +def _make_tiny_onnx(path: Path) -> None: + """Write a minimal valid ONNX model: identity over a 1-D float tensor.""" + inp = helper.make_tensor_value_info("x", TensorProto.FLOAT, [None]) + out = helper.make_tensor_value_info("y", TensorProto.FLOAT, [None]) + node = helper.make_node("Identity", inputs=["x"], outputs=["y"]) + graph = helper.make_graph([node], "tiny", [inp], [out]) + model = helper.make_model(graph, producer_name="test", opset_imports=[helper.make_opsetid("", 17)]) + onnx.save(model, str(path)) + + +class TestStampMetadataProps(unittest.TestCase): + def setUp(self): + self.tmp = tempfile.TemporaryDirectory() + self.path = Path(self.tmp.name) / "tiny.onnx" + _make_tiny_onnx(self.path) + + def tearDown(self): + self.tmp.cleanup() + + def _props(self) -> dict: + m = onnx.load(str(self.path)) + return {p.key: p.value for p in m.metadata_props} + + def test_stamps_three_keys(self): + stamp_metadata_props(self.path, "v1", ["a", "b"], ["x", "y", "z"]) + props = self._props() + self.assertEqual(props["model_version"], "v1") + self.assertEqual(props["node_features"], "a,b") + self.assertEqual(props["edge_features"], "x,y,z") + + def test_idempotent_overwrite(self): + stamp_metadata_props(self.path, "v1", ["a"], ["x"]) + stamp_metadata_props(self.path, "v2", ["a", "b"], ["x", "y"]) + props = self._props() + self.assertEqual(props["model_version"], "v2") + self.assertEqual(props["node_features"], "a,b") + self.assertEqual(props["edge_features"], "x,y") + # No duplicates. + m = onnx.load(str(self.path)) + keys = [p.key for p in m.metadata_props] + self.assertEqual(len(keys), len(set(keys))) + + def test_preserves_other_metadata(self): + # Pre-stamp a foreign key the export shouldn't touch. + m = onnx.load(str(self.path)) + e = m.metadata_props.add() + e.key = "producer_pytorch_version" + e.value = "2.5.1" + onnx.save(m, str(self.path)) + + stamp_metadata_props(self.path, "v1", NODE_FEATURE_NAMES, EDGE_FEATURE_NAMES) + props = self._props() + self.assertEqual(props["producer_pytorch_version"], "2.5.1") + self.assertEqual(props["model_version"], "v1") + + def test_canonical_names_match_norm_sidecar(self): + # Same lists used by scripts/export_norm_stats.py — these have + # to agree, because the C++ side does both assertions. + from scripts.export_norm_stats import ( + EDGE_FEATURE_NAMES as SIDECAR_EDGE, + NODE_FEATURE_NAMES as SIDECAR_NODE, + ) + self.assertEqual(NODE_FEATURE_NAMES, SIDECAR_NODE) + self.assertEqual(EDGE_FEATURE_NAMES, SIDECAR_EDGE) + + def test_default_model_version_is_v2_stage1(self): + # Sanity-check the default the C++ deployment expects. + self.assertEqual(DEFAULT_MODEL_VERSION, "calo-cluster-net-v2-stage1") + + +if __name__ == "__main__": + unittest.main() From e7f7578b19e6969ad132f205e18024a33514bac5 Mon Sep 17 00:00:00 2001 From: Sam Zhou Date: Thu, 7 May 2026 12:36:38 -0500 Subject: [PATCH 3/3] README: link Mu2e/EventNtuple#366 (ancestorSimIds PR) --- CaloClusterGNN/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CaloClusterGNN/README.md b/CaloClusterGNN/README.md index d3f08fe..474bcd5 100644 --- a/CaloClusterGNN/README.md +++ b/CaloClusterGNN/README.md @@ -48,7 +48,7 @@ Two short pointers: energy in the same disk. This recovers true shower membership for hits split across crystals during showering. Requires the `calomcsim.ancestorSimIds` branch added in - [Mu2e/EventNtuple #](https://github.com/Mu2e/EventNtuple/pulls). + [Mu2e/EventNtuple#366](https://github.com/Mu2e/EventNtuple/pull/366). See `src/data/truth_labels_primary.py`. ## For the Interested (Re)Trainer @@ -205,7 +205,7 @@ change them you'll want to re-tune and re-evaluate: The EventNtuples for v2 require the `calomcsim.ancestorSimIds` branch added in -[Mu2e/EventNtuple#TBD](https://github.com/Mu2e/EventNtuple/pulls). +[Mu2e/EventNtuple#366](https://github.com/Mu2e/EventNtuple/pull/366). ## Deployment