18 added files
lcsim/src/org/lcsim/recon/cluster/structural
diff -N GenericStructuralDriver.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ GenericStructuralDriver.java 1 Mar 2006 02:45:57 -0000 1.1
@@ -0,0 +1,188 @@
+package org.lcsim.recon.cluster.structural;
+
+import org.lcsim.util.Driver;
+import org.lcsim.event.EventHeader;
+import org.lcsim.event.Cluster;
+
+import java.util.List;
+import java.util.Vector;
+
+import org.lcsim.util.decision.*;
+import org.lcsim.recon.cluster.util.*;
+import org.lcsim.recon.cluster.analysis.*;
+import org.lcsim.event.MCParticle;
+
+/**
+ * Basic, abstract implementation of a structural driver.
+ * This provides the general framework and event loop for
+ * handling composite clusters which contain clumps and
+ * track segments. See the process() description for more detail.
+ *
+ * @author Mat Charles <[log in to unmask]>
+ * @version $Id: GenericStructuralDriver.java,v 1.1 2006/03/01 02:45:57 mcharles Exp $
+ */
+
+public abstract class GenericStructuralDriver extends Driver
+{
+ // Convenient way of passing the current event back and forth
+ transient protected EventHeader m_event;
+
+ // The internal interface which implementing classes must provide:
+ abstract protected void initializeEvent();
+ abstract protected List<Cluster> getListOfBigClusters();
+ abstract protected void initializeBigCluster(Cluster clus);
+ abstract protected List<Cluster> findTrackSegments(Cluster clus);
+ abstract protected List<Cluster> findClumps(Cluster clus);
+ abstract protected void compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2);
+ abstract protected void compareTrackSegmentToClump(Cluster clus1, Cluster clus2);
+ abstract protected void compareClumpToClump(Cluster clus1, Cluster clus2);
+ abstract protected void finalizeBigCluster(Cluster clus, List<Cluster> vMIPs, List<Cluster> vClumps);
+ abstract protected void finalizeEvent();
+
+ /** Turn on/off debug printout */
+ public void setDebug(boolean debug) { m_debug = debug; }
+ boolean m_debug = false;
+
+ /** Supply a decision which will be used to veto large-scale clusters */
+ public void setIgnoreClusterDecision(DecisionMakerSingle<Cluster> dec) { m_ignoreClusterDecision = dec; }
+ public DecisionMakerSingle<Cluster> getIgnoreClusterDecision() { return m_ignoreClusterDecision; }
+ protected DecisionMakerSingle<Cluster> m_ignoreClusterDecision = new ClusterNotEmptyDecisionMaker();
+
+ /**
+ * Main event loop. For each large-scale cluster:
+ * <UL>
+ * <LI> Check whether the cluster should be ignored. If not:
+ * <LI> Call <code>initializeBigCluster</code> on the cluster
+ * <LI> Get the list of clumps with <code>findClumps</code>
+ * <LI> Get the list of track segments with <code>findTrackSegments</code>
+ * <LI> For each pairwise combination of track segments,
+ * call <code>compareTrackSegmentToTrackSegment</code>
+ * <LI> For each combination of one track segment and one clump,
+ * call <code>compareTrackSegmentToClump</code>
+ * <LI> For each pairwise combination of clumps
+ * call <code>compareClumpToClump</code>
+ * <LI> Call <code>finalizeBigCluster</code>
+ * </UL>
+ * After looping over all large-scale clusters, calls <code>finalizeEvent</code>
+ */
+ public void process(EventHeader event)
+ {
+ m_event = event;
+
+ initializeEvent();
+ List<Cluster> listOfBigClusters = getListOfBigClusters();
+ for (Cluster currentCluster : listOfBigClusters) {
+ boolean ignoreThisCluster = !(m_ignoreClusterDecision.valid(currentCluster));
+ if (!ignoreThisCluster) {
+ initializeBigCluster(currentCluster);
+ List<Cluster> vClumps = findClumps(currentCluster);
+ List<Cluster> vMIPs = findTrackSegments(currentCluster);
+ if (m_debug) { System.out.println("DEBUG: Found "+vClumps.size()+" clumps and "+vMIPs.size()+" track segments in the event."); }
+ // Consider MIP-MIP links, making sure to compare each pair
+ // once and once only:
+ if (vMIPs != null) {
+ for (int iMIP=0; iMIP<vMIPs.size(); iMIP++) {
+ Cluster track1 = vMIPs.get(iMIP);
+ // Compare to other MIPs:
+ for (int jMIP=iMIP+1; jMIP<vMIPs.size(); jMIP++) {
+ Cluster track2 = vMIPs.get(jMIP);
+ compareTrackSegmentToTrackSegment(track1, track2);
+ }
+ }
+ }
+ // Now consider MIP-Clump links:
+ if (vMIPs != null && vClumps != null) {
+ for (Cluster track : vMIPs) {
+ for (Cluster clump : vClumps) {
+ compareTrackSegmentToClump(track, clump);
+ }
+ }
+ }
+ // Now consider Clump-Clump links:
+ if (vClumps != null) {
+ for (int iClump=0; iClump<vClumps.size(); iClump++) {
+ Cluster clump1 = vClumps.get(iClump);
+ for (int jClump=iClump+1; jClump<vClumps.size(); jClump++) {
+ Cluster clump2 = vClumps.get(jClump);
+ compareClumpToClump(clump1, clump2);
+ }
+ }
+ }
+ // Finalize big cluster:
+ finalizeBigCluster(currentCluster, vMIPs, vClumps);
+ }
+ }
+ // OK, done looping. We may want to do a final step, such as writing out
+ // the results:
+ finalizeEvent();
+ }
+
+ /**
+ * Internal routine to check association, using Ron's code.
+ * This is used to compare a (track segment or clump) to a
+ * (track segment or clump). The cluster associator must be
+ * initalized first.
+ */
+ protected boolean checkAssociationComponentToComponent(Cluster clus1, Cluster clus2)
+ {
+ // First make sure that the associator has been set up:
+ if (m_clusterAssociator == null) {
+ throw new java.lang.NullPointerException("ERROR: Tried to check association in class "+this.getClass().getName()+" but associator not initialized");
+ } else {
+ // Check whether the event cache is out of date
+ if (m_clusterAssociatorLastEvent != m_event) {
+ m_clusterAssociator.CreateLists(m_event);
+ }
+ // Get the cluster info list
+ List<ClusterMCPInfo> clusterInfoList = m_event.get(ClusterMCPInfo.class, m_clusterAssociatorOutputListClusterToMCParticle);
+ // Find the info for the two clusters:
+ ClusterMCPInfo info1 = null;
+ ClusterMCPInfo info2 = null;
+ for (ClusterMCPInfo info : clusterInfoList) {
+ Cluster clus = info.getCluster();
+ if (clus == clus1) { info1 = info; }
+ if (clus == clus2) { info2 = info; }
+ }
+ if (info1 == null || info2 == null) {
+ throw new java.lang.NullPointerException("ERROR: Info not found");
+ } else {
+ // Found both info objects OK.
+ // Use them to find the MCParticle that makes the biggest
+ // contribution to each cluster, and check if they match:
+ MCParticle max1 = info1.getMaxContributer();
+ MCParticle max2 = info2.getMaxContributer();
+ return (max1 == max2);
+ }
+ }
+ }
+
+ /**
+ * Initialize the cluster associator. Initialization strings are
+ * the same as used by <code>CreateClusterAnalysisLists</code>.
+ *
+ * @param hitListNames String array of names of List<CalorimeterHit> in the event
+ * @param clusterListNames String array of names of List<Cluster> in the event.
+ * @param mcListName Name of the self-consistent List<MCParticle> in the event
+ * @param outputListMCParticleToCluster Name to write out the List<MCPClusterInfo> to the event
+ * @param outputListClusterToMCParticle Name to write out the List<ClusterMCPInfo> to the event
+ * @see org.lcsim.recon.cluster.analysis.CreateClusterAnalysisLists
+ */
+ public void initializeClusterAssociator(String[] hitListNames,
+ String[] clusterListNames,
+ String mcListName,
+ String outputListMCParticleToCluster,
+ String outputListClusterToMCParticle
+ )
+ {
+ m_clusterAssociatorOutputListMCParticleToCluster = outputListMCParticleToCluster;
+ m_clusterAssociatorOutputListClusterToMCParticle = outputListClusterToMCParticle;
+ m_clusterAssociator = new CreateClusterAnalysisLists(hitListNames, clusterListNames, mcListName, outputListMCParticleToCluster, outputListClusterToMCParticle);
+ }
+
+ // Support for the association:
+ transient protected EventHeader m_clusterAssociatorLastEvent = null;
+ protected CreateClusterAnalysisLists m_clusterAssociator = null;
+ protected String m_clusterAssociatorOutputListMCParticleToCluster;
+ protected String m_clusterAssociatorOutputListClusterToMCParticle;
+
+}
lcsim/src/org/lcsim/recon/cluster/structural
diff -N HaloAssigner.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ HaloAssigner.java 1 Mar 2006 02:45:57 -0000 1.1
@@ -0,0 +1,137 @@
+package org.lcsim.recon.cluster.structural;
+
+import java.util.*;
+
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+import hep.physics.vec.VecOp;
+
+import org.lcsim.event.EventHeader;
+import org.lcsim.util.Driver;
+import org.lcsim.event.Cluster;
+import org.lcsim.event.CalorimeterHit;
+import org.lcsim.recon.cluster.util.BasicCluster;
+
+/**
+ * Assign hits which are not part of a Cluster to a nearby Cluster.
+ *
+ * @author Mat Charles <[log in to unmask]>
+ * @version $Id: HaloAssigner.java,v 1.1 2006/03/01 02:45:57 mcharles Exp $
+ */
+
+public class HaloAssigner extends Driver
+{
+ /**
+ * Constructor.
+ *
+ * The input Clusters (inputClusterListName) and the input HitMap (inputHitMapName)
+ * must not have any hits in common.
+ *
+ * The output hit map may be empty if all hits are assigned. This is
+ * the behaviour in the default implementation.
+ *
+ * @param inputClusterListName Name of the List<Cluster> to grab from the event
+ * @param inputHitMapName Name of the HitMap to grab from the event
+ * @param outputClusterListName Name of the List<Cluster> to write to the event when finished
+ * @param outputHitMapName Name of the HitMap of leftover hits to write to the event when finished
+ */
+ public HaloAssigner(String inputClusterListName, String inputHitMapName, String outputClusterListName, String outputHitMapName) {
+ m_inputClusterListName = inputClusterListName;
+ m_inputHitMapName = inputHitMapName;
+ m_outputClusterListName = outputClusterListName;
+ m_outputHitMapName = outputHitMapName;
+ }
+
+ /** Process one event */
+ public void process(EventHeader event)
+ {
+ // Get the input clusters and hits:
+ List<Cluster> inputSkeletons = event.get(Cluster.class, m_inputClusterListName);
+ Map<Long, CalorimeterHit> inputHitMap = (Map<Long, CalorimeterHit>) (event.get(m_inputHitMapName));
+
+ // Prepare the outputs:
+ Map<Long, CalorimeterHit> outputHitMap = new HashMap<Long,CalorimeterHit>(inputHitMap); // initially full
+ List<Cluster> outputClusterList = new Vector<Cluster>();
+
+ // For all unused hits, assign the hit to a cluster.
+ // We do this as a two-step process to ensure that the
+ // we're independent of the hit/cluster ordering.
+
+ // First pass: find assignments
+ Map<Cluster, List<CalorimeterHit>> foundAssignments = new HashMap<Cluster, List<CalorimeterHit>>();
+ for (CalorimeterHit hit : inputHitMap.values()) {
+ Cluster bestMatch = findBestCluster(hit, inputSkeletons);
+ List<CalorimeterHit> matchedHits = foundAssignments.get(bestMatch);
+ if (matchedHits == null) {
+ matchedHits = new Vector<CalorimeterHit>();
+ foundAssignments.put(bestMatch, matchedHits);
+ }
+ // Assigned => add to matchedHits; remove from output hitmap
+ outputHitMap.remove(hit.getCellID());
+ matchedHits.add(hit);
+ }
+
+ // Second pass: create expanded clusters
+ for (Cluster inputCluster : inputSkeletons) {
+ BasicCluster outputCluster = new BasicCluster();
+ outputCluster.addCluster(inputCluster);
+ List<CalorimeterHit> matchedHits = foundAssignments.get(inputCluster);
+ if (matchedHits != null) {
+ for (CalorimeterHit matchedHit : matchedHits) {
+ outputCluster.addHit(matchedHit);
+ }
+ }
+ outputClusterList.add(outputCluster);
+ }
+
+ event.put(m_outputClusterListName, outputClusterList);
+ event.put(m_outputHitMapName, outputHitMap);
+ }
+
+ /** Internal utility routine: Given a calorimeter hit, find the cluster which matches best. */
+ protected Cluster findBestCluster(CalorimeterHit hit, List<Cluster> clusters)
+ {
+ Cluster bestMatch = null;
+ double minDistance = 0;
+ for (Cluster subCluster : clusters) {
+ // How far to this cluster?
+ double dist = distance(subCluster, hit);
+ if (bestMatch == null || dist < minDistance) {
+ minDistance = dist;
+ bestMatch = subCluster;
+ }
+ }
+ return bestMatch;
+ }
+
+ // This belongs outside this class.
+ private double distance(Cluster clus, CalorimeterHit hit)
+ {
+ // Loop over hits...
+ boolean firstCheck = true;
+ double minDistance = Double.NaN; // Will stay NaN if clus is empty
+ List<CalorimeterHit> hits = clus.getCalorimeterHits();
+ for (CalorimeterHit hitInCluster : hits) {
+ double dist = distance(hit, hitInCluster);
+ if (firstCheck || dist<minDistance) {
+ minDistance = dist;
+ firstCheck = false;
+ }
+ }
+
+ return minDistance;
+ }
+
+ // This belongs outside this class.
+ private double distance(CalorimeterHit hit1, CalorimeterHit hit2)
+ {
+ Hep3Vector vect1 = new BasicHep3Vector(hit1.getPosition());
+ Hep3Vector vect2 = new BasicHep3Vector(hit2.getPosition());
+ Hep3Vector displacement = VecOp.sub(vect1, vect2);
+ return displacement.magnitude();
+ }
+ String m_inputClusterListName;
+ String m_inputHitMapName;
+ String m_outputClusterListName;
+ String m_outputHitMapName;
+}
lcsim/src/org/lcsim/recon/cluster/structural
diff -N LikelihoodFindingStructuralDriver.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ LikelihoodFindingStructuralDriver.java 1 Mar 2006 02:45:57 -0000 1.1
@@ -0,0 +1,116 @@
+package org.lcsim.recon.cluster.structural;
+
+import java.util.List;
+import java.util.Vector;
+import java.util.Map;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.event.EventHeader;
+import org.lcsim.recon.cluster.structural.likelihood.*;
+
+/**
+ * Compute likelihoods for linking components of clusters.
+ * Requires use of truth information to construct the
+ * signal and background likelihood PDFs, so
+ * <code>initializeClusterAssociator</code> in the base
+ * class must be called.
+ *
+ * @author Mat Charles <[log in to unmask]>
+ * @version $Id: LikelihoodFindingStructuralDriver.java,v 1.1 2006/03/01 02:45:57 mcharles Exp $
+ */
+
+public class LikelihoodFindingStructuralDriver extends GenericStructuralDriver
+{
+
+ LikelihoodEvaluator m_eval = null;
+ String m_listOfBigClustersName = null;
+ String m_listOfMIPsName = null;
+ String m_listOfClumpsName = null;
+
+ /**
+ * Constructor.
+ *
+ * @param eval The LikelihoodEvaluator holding the set of likelihood variables to check
+ * @param listOfBigClusters The List<Cluster> of large-scale clusters to grab from each event
+ * @param listOfTrackSegments The List<Cluster> of track segments to grab from each event
+ * @param listOfClumps The List<Cluster> of clumps to grab from each event
+ */
+ public LikelihoodFindingStructuralDriver(LikelihoodEvaluator eval, String listOfBigClusters, String listOfTrackSegments, String listOfClumps) {
+ m_eval = eval; // This should be set up beforehand
+ m_listOfBigClustersName = listOfBigClusters;
+ m_listOfMIPsName = listOfTrackSegments;
+ m_listOfClumpsName = listOfClumps;
+ }
+
+ protected List<Cluster> getListOfBigClusters() {
+ // Grab from event store
+ List<Cluster> vBigClusters = m_event.get(Cluster.class, m_listOfBigClustersName);
+ return vBigClusters;
+ }
+
+ protected List<Cluster> recursivelyFindSubClusters(Cluster clus)
+ {
+ List<Cluster> output = new Vector<Cluster>();
+ for (Cluster dau : clus.getClusters()) {
+ output.addAll(recursivelyFindSubClusters(dau));
+ }
+ output.add(clus);
+ return output;
+ }
+
+ protected List<Cluster> findTrackSegments(Cluster clus) {
+ List<Cluster> eventMIPList = m_event.get(Cluster.class, m_listOfMIPsName);
+ List<Cluster> outputMIPList = new Vector<Cluster>();
+ List<Cluster> flatListOfSubClusters = recursivelyFindSubClusters(clus);
+ for (Cluster mip : eventMIPList) {
+ if (flatListOfSubClusters.contains(mip)) {
+ outputMIPList.add(mip);
+ }
+ }
+ return outputMIPList;
+ }
+
+ protected List<Cluster> findClumps(Cluster clus) {
+ List<Cluster> eventClumpList = m_event.get(Cluster.class, m_listOfClumpsName);
+ List<Cluster> outputClumpList = new Vector<Cluster>();
+ List<Cluster> flatListOfSubClusters = recursivelyFindSubClusters(clus);
+ for (Cluster clump : eventClumpList) {
+ if (flatListOfSubClusters.contains(clump)) {
+ outputClumpList.add(clump);
+ }
+ }
+ return outputClumpList;
+ }
+
+ protected void compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2)
+ {
+ boolean isLinkCorrect = checkAssociationComponentToComponent(clus1, clus2);
+ List<LikelihoodDistribution> vDistributions = m_eval.getLikelihoodDistributionTrackToTrack(isLinkCorrect);
+ for ( LikelihoodDistribution dist : vDistributions ) {
+ dist.fill(clus1, clus2);
+ }
+ }
+
+ protected void compareTrackSegmentToClump(Cluster clus1, Cluster clus2)
+ {
+ boolean isLinkCorrect = checkAssociationComponentToComponent(clus1, clus2);
+ List<LikelihoodDistribution> vDistributions = m_eval.getLikelihoodDistributionTrackToClump(isLinkCorrect);
+ for ( LikelihoodDistribution dist : vDistributions ) {
+ dist.fill(clus1, clus2);
+ }
+ }
+
+ protected void compareClumpToClump(Cluster clus1, Cluster clus2)
+ {
+ boolean isLinkCorrect = checkAssociationComponentToComponent(clus1, clus2);
+ List<LikelihoodDistribution> vDistributions = m_eval.getLikelihoodDistributionClumpToClump(isLinkCorrect);
+ for ( LikelihoodDistribution dist : vDistributions ) {
+ dist.fill(clus1, clus2);
+ }
+ }
+
+ protected void initializeEvent() {}
+ protected void initializeBigCluster(Cluster bigClus) {}
+ protected void finalizeEvent() {}
+ protected void finalizeBigCluster(Cluster bigClus, List<Cluster> vMIPs, List<Cluster> vClumps) {}
+}
lcsim/src/org/lcsim/recon/cluster/structural
diff -N LikelihoodLinkDriver.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ LikelihoodLinkDriver.java 1 Mar 2006 02:45:57 -0000 1.1
@@ -0,0 +1,244 @@
+package org.lcsim.recon.cluster.structural;
+
+import java.util.List;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Vector;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.recon.cluster.util.BasicCluster;
+import org.lcsim.event.EventHeader;
+import org.lcsim.event.CalorimeterHit;
+import org.lcsim.recon.cluster.structural.likelihood.*;
+
+/**
+ * Examine the components of large-scale clusters and
+ * determine whether they are actually linked.
+ * This does not (directly) use truth information.
+ * The likelihood distributions are read in from the
+ * LikelihoodEvaluator.
+ *
+ * @author Mat Charles <[log in to unmask]>
+ * @version $Id: LikelihoodLinkDriver.java,v 1.1 2006/03/01 02:45:57 mcharles Exp $
+ */
+
+public class LikelihoodLinkDriver extends GenericStructuralDriver
+{
+ LikelihoodEvaluator m_eval = null;
+ double m_cutTrackToTrack = 0.5;
+ double m_cutTrackToClump = 0.5;
+ double m_cutClumpToClump = 0.5;
+ Vector<Link> m_vlinksTrackToTrack = null;
+ Vector<Link> m_vlinksTrackToClump = null;
+ Vector<Link> m_vlinksClumpToClump = null;
+ String m_listOfBigClustersName = null;
+ String m_listOfMIPsName = null;
+ String m_listOfClumpsName = null;
+ String m_outputListOfSkeletonsName = null;
+ String m_outputHitMapName = null;
+
+ /**
+ * Constructor.
+ *
+ * @param eval The LikelihoodEvaluator which contains the likelihood PDFs
+ * @param cutTrackToTrack To make a track-track link, the likelihood must be greater than this (e.g. 0.5)
+ * @param cutTrackToClump To make a track-clump link, the likelihood must be greater than this (e.g. 0.5)
+ * @param cutClumpToClump To make a clump-clump link, the likelihood must be greater than this (e.g. 0.5)
+ * @param listOfBigClustersThe The List<Cluster> of large-scale clusters to grab from each event
+ * @param listOfTrackSegments The List<Cluster> of track segments to grab from each event
+ * @param listOfClumps The List<Cluster> of clumps to grab from each event
+ * @param outputListOfSkeletons The List<Cluster> of linked clusters to write to the event when finished
+ * @param outputHitMap The HitMap of unused hits to write to the event when finished
+ */
+ public LikelihoodLinkDriver(LikelihoodEvaluator eval, double cutTrackToTrack, double cutTrackToClump, double cutClumpToClump, String listOfBigClusters, String listOfTrackSegments, String listOfClumps, String outputListOfSkeletons, String outputHitMap) {
+ m_eval = eval;
+ m_cutTrackToTrack = cutTrackToTrack;
+ m_cutTrackToClump = cutTrackToClump;
+ m_cutClumpToClump = cutClumpToClump;
+ m_listOfBigClustersName = listOfBigClusters;
+ m_listOfMIPsName = listOfTrackSegments;
+ m_listOfClumpsName = listOfClumps;
+ m_outputListOfSkeletonsName = outputListOfSkeletons;
+ m_outputHitMapName = outputHitMap;
+ }
+
+ protected List<Cluster> getListOfBigClusters() {
+ // Grab from event store
+ List<Cluster> vBigClusters = m_event.get(Cluster.class, m_listOfBigClustersName);
+ return vBigClusters;
+ }
+
+ // Some duplicated code...
+ protected List<Cluster> recursivelyFindSubClusters(Cluster clus)
+ {
+ List<Cluster> output = new Vector<Cluster>();
+ for (Cluster dau : clus.getClusters()) {
+ output.addAll(recursivelyFindSubClusters(dau));
+ }
+ output.add(clus);
+ return output;
+ }
+
+ protected List<Cluster> findTrackSegments(Cluster clus) {
+ List<Cluster> eventMIPList = m_event.get(Cluster.class, m_listOfMIPsName);
+ List<Cluster> outputMIPList = new Vector<Cluster>();
+ List<Cluster> flatListOfSubClusters = recursivelyFindSubClusters(clus);
+ for (Cluster mip : eventMIPList) {
+ if (flatListOfSubClusters.contains(mip)) {
+ outputMIPList.add(mip);
+ }
+ }
+ return outputMIPList;
+ }
+
+ protected List<Cluster> findClumps(Cluster clus) {
+ List<Cluster> eventClumpList = m_event.get(Cluster.class, m_listOfClumpsName);
+ List<Cluster> outputClumpList = new Vector<Cluster>();
+ List<Cluster> flatListOfSubClusters = recursivelyFindSubClusters(clus);
+ for (Cluster clump : eventClumpList) {
+ if (flatListOfSubClusters.contains(clump)) {
+ outputClumpList.add(clump);
+ }
+ }
+ return outputClumpList;
+ }
+
+ protected void compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2) {
+ double likelihood = m_eval.getLinkLikelihoodTrackToTrack(clus1, clus2);
+ if (likelihood > m_cutTrackToTrack) {
+ m_vlinksTrackToTrack.add(new Link(clus1, clus2));
+ }
+ //System.out.println("DEBUG: "+this.getClass().getName()+": linking Track("+clus1.getCalorimeterHits().size()+") to Track("+clus2.getCalorimeterHits().size()+") likelihood="+likelihood+" (c.f. cut="+m_cutTrackToTrack+")");
+ }
+ protected void compareTrackSegmentToClump(Cluster clus1, Cluster clus2) {
+ double likelihood = m_eval.getLinkLikelihoodTrackToClump(clus1, clus2);
+ if (likelihood > m_cutTrackToClump) {
+ m_vlinksTrackToClump.add(new Link(clus1, clus2));
+ }
+ //System.out.println("DEBUG: "+this.getClass().getName()+": linking Track("+clus1.getCalorimeterHits().size()+") to Clump("+clus2.getCalorimeterHits().size()+") likelihood="+likelihood+" (c.f. cut="+m_cutTrackToClump+")");
+ }
+ protected void compareClumpToClump(Cluster clus1, Cluster clus2) {
+ double likelihood = m_eval.getLinkLikelihoodClumpToClump(clus1, clus2);
+ if (likelihood > m_cutClumpToClump) {
+ m_vlinksClumpToClump.add(new Link(clus1, clus2));
+ }
+ //System.out.println("DEBUG: "+this.getClass().getName()+": linking Clump("+clus1.getCalorimeterHits().size()+") to Clump("+clus2.getCalorimeterHits().size()+") likelihood="+likelihood+" (c.f. cut="+m_cutClumpToClump+")");
+ }
+
+ protected void initializeEvent() {
+ // Set up the output for this event.
+ // Output 1: List<Cluster> with the skeletons
+ List<Cluster> tmpList = new Vector<Cluster>();
+ m_event.put(m_outputListOfSkeletonsName, tmpList);
+ // Output 2: HitMap with halo/unused hits
+ Map<Long,CalorimeterHit> tmpMap = new HashMap<Long,CalorimeterHit>();
+ m_event.put(m_outputHitMapName, tmpMap);
+ }
+
+ protected void initializeBigCluster(Cluster bigClus) {
+ m_vlinksTrackToTrack = new Vector<Link>();
+ m_vlinksTrackToClump = new Vector<Link>();
+ m_vlinksClumpToClump = new Vector<Link>();
+ }
+
+ protected void finalizeEvent() {
+ //
+ }
+ protected void finalizeBigCluster(Cluster bigClus, List<Cluster> vMIPs, List<Cluster> vClumps) {
+ // Merge links & clusters:
+ List<Cluster> vLinkedClusters = new Vector<Cluster>();
+ Set<Cluster> unassignedClumps = new HashSet<Cluster>();
+ Set<Cluster> unassignedMIPs = new HashSet<Cluster>();
+ unassignedClumps.addAll(vClumps);
+ unassignedMIPs.addAll(vMIPs);
+ while ( !(unassignedMIPs.isEmpty() && unassignedClumps.isEmpty()) ) {
+ Set<Cluster> linkedClusters = new HashSet<Cluster>(); // A set containing a bunch of clusters, linked.
+ if ( !(unassignedMIPs.isEmpty()) ) {
+ Cluster nextMIP = unassignedMIPs.iterator().next();
+ recursivelyAddTrack(nextMIP, linkedClusters, unassignedMIPs, unassignedClumps);
+ } else {
+ Cluster nextClump = unassignedClumps.iterator().next();
+ recursivelyAddClump(nextClump, linkedClusters, unassignedMIPs, unassignedClumps);
+ }
+ BasicCluster newMergedCluster = new BasicCluster();
+ for (Cluster currentLinkedCluster : linkedClusters) {
+ newMergedCluster.addCluster(currentLinkedCluster);
+ }
+ vLinkedClusters.add(newMergedCluster);
+ }
+
+ // Output 1: List<Cluster> with the skeletons
+ List<Cluster> outputSkeletonList = m_event.get(Cluster.class, m_outputListOfSkeletonsName);
+ outputSkeletonList.addAll(vLinkedClusters);
+
+ // Output 2: HitMap with halo/unused hits
+ Map<Long,CalorimeterHit> outputHitMap = (Map<Long,CalorimeterHit>) (m_event.get(m_outputHitMapName));
+ // Find unused hits in this big cluster:
+ for (CalorimeterHit hit : bigClus.getCalorimeterHits()) {
+ boolean hitInSkeleton = false;
+ for (Cluster skeleton : vLinkedClusters) {
+ if (skeleton.getCalorimeterHits().contains(hit)) {
+ hitInSkeleton = true;
+ break;
+ }
+ }
+ if (!hitInSkeleton) {
+ // Unused hit
+ outputHitMap.put(hit.getCellID(), hit);
+ }
+ }
+ }
+
+ void recursivelyAddTrack(Cluster currentMIP, Set<Cluster> linkedClusters, Set<Cluster> unassignedMIPs, Set<Cluster> unassignedClumps) {
+ boolean removedCurrentOK = unassignedMIPs.remove(currentMIP);
+ boolean addedCurrentOK = linkedClusters.add(currentMIP);
+ if (!removedCurrentOK) { throw new AssertionError("Failed to remove MIP"); }
+ if (!addedCurrentOK) { throw new AssertionError("Failed to add MIP"); }
+ // Track-Track links:
+ for (Link currentLink : m_vlinksTrackToTrack) {
+ if (currentLink.contains(currentMIP)) {
+ Cluster nextMIP = currentLink.counterpart(currentMIP);
+ if (unassignedMIPs.contains(nextMIP)) {
+ recursivelyAddTrack(nextMIP, linkedClusters, unassignedMIPs, unassignedClumps);
+ }
+ }
+ }
+ // Track-Clump links
+ for (Link currentLink : m_vlinksTrackToClump) {
+ if (currentLink.contains(currentMIP)) {
+ Cluster nextClump = currentLink.counterpart(currentMIP);
+ if (unassignedClumps.contains(nextClump)) {
+ recursivelyAddClump(nextClump, linkedClusters, unassignedMIPs, unassignedClumps);
+ }
+ }
+ }
+ }
+
+ void recursivelyAddClump(Cluster currentClump, Set<Cluster> linkedClusters, Set<Cluster> unassignedMIPs, Set<Cluster> unassignedClumps) {
+ boolean removedCurrentOK = unassignedClumps.remove(currentClump);
+ boolean addedCurrentOK = linkedClusters.add(currentClump);
+ if (!removedCurrentOK && !addedCurrentOK) { throw new AssertionError("Failed to add clump AND failed to add clump."); }
+ if (!removedCurrentOK) { throw new AssertionError("Failed to remove clump."); }
+ if (!addedCurrentOK) { throw new AssertionError("Failed to add clump."); }
+ // Track-Clump links
+ for (Link currentLink : m_vlinksTrackToClump) {
+ if (currentLink.contains(currentClump)) {
+ Cluster nextMIP = currentLink.counterpart(currentClump);
+ if (unassignedMIPs.contains(nextMIP)) {
+ recursivelyAddTrack(nextMIP, linkedClusters, unassignedMIPs, unassignedClumps);
+ }
+ }
+ }
+ // Clump-Clump links
+ for (Link currentLink : m_vlinksClumpToClump) {
+ if (currentLink.contains(currentClump)) {
+ Cluster nextClump = currentLink.counterpart(currentClump);
+ if (unassignedClumps.contains(nextClump)) {
+ recursivelyAddClump(nextClump, linkedClusters, unassignedMIPs, unassignedClumps);
+ }
+ }
+ }
+ }
+}
lcsim/src/org/lcsim/recon/cluster/structural
diff -N Link.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ Link.java 1 Mar 2006 02:45:57 -0000 1.1
@@ -0,0 +1,35 @@
+package org.lcsim.recon.cluster.structural;
+
+import org.lcsim.event.Cluster;
+
+/**
+ * Class representing a link between two component clusters of a hadronic
+ * shower. This class is not public and is not accessible from outside
+ * org.lcsim.recon.cluster.structural
+ *
+ * @author Mat Charles <[log in to unmask]>
+ * @version $Id: Link.java,v 1.1 2006/03/01 02:45:57 mcharles Exp $
+ */
+
+class Link {
+ public Link(Cluster clus1, Cluster clus2) {
+ c1 = clus1;
+ c2 = clus2;
+ }
+ public boolean contains(Cluster clus) {
+ return (clus==c1 || clus==c2);
+ }
+ public Cluster counterpart(Cluster clus) {
+ if (clus==c1) {
+ return c2;
+ } else if (clus==c2) {
+ return c1;
+ } else {
+ return null;
+ }
+ }
+ public Cluster getFirst() { return c1; }
+ public Cluster getSecond() { return c2; }
+ protected Cluster c1 = null;
+ protected Cluster c2 = null;
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N ClumpToClumpDOCA.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ ClumpToClumpDOCA.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,33 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+
+public class ClumpToClumpDOCA implements StructuralLikelihoodQuantity
+{
+ public ClumpToClumpDOCA() {}
+
+ public double evaluate(Cluster clus1, Cluster clus2)
+ {
+ // We used to be able to figure out which cluster was
+ // pointier from the eigenvaluse, but now we can't (easily).
+ // So sad. So now we try it both ways and return the smaller
+ // of the two DOCAs.
+
+ Hep3Vector position1 = new BasicHep3Vector(clus1.getPosition());
+ Hep3Vector position2 = new BasicHep3Vector(clus2.getPosition());
+ Trajectory line1 = new Line(position1, clus1.getIPhi(), clus1.getITheta());
+ Trajectory line2 = new Line(position2, clus2.getIPhi(), clus2.getITheta());
+
+ double doca1 = Math.abs(line1.getDistanceToPoint(position2));
+ double doca2 = Math.abs(line2.getDistanceToPoint(position1));
+
+ double doca = Math.min(doca1, doca2);
+ return doca;
+ }
+}
+
+
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N ClusterToClusterMinDistance.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ ClusterToClusterMinDistance.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,13 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class ClusterToClusterMinDistance implements StructuralLikelihoodQuantity
+{
+ public ClusterToClusterMinDistance() {}
+
+ public double evaluate(Cluster clus1, Cluster clus2) {
+ double minDist = MiscUtilities.distance(clus1, clus2);
+ return minDist;
+ }
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N LikelihoodDistribution.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ LikelihoodDistribution.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,170 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class LikelihoodDistribution implements java.io.Serializable
+{
+
+ StructuralLikelihoodQuantity m_quantity = null;
+ int m_nbins;
+ double m_min;
+ double m_max;
+ boolean m_useUnderFlow;
+ boolean m_useOverFlow;
+
+ double[] m_binContent;
+ double m_underFlow;
+ double m_overFlow;
+
+ boolean m_normalized = false;
+ double m_normalization;
+
+ public boolean useUnderFlow() { return m_useUnderFlow; }
+ public boolean useOverFlow() { return m_useOverFlow; }
+ public int getNbins() { return m_nbins; }
+ public double getMin() { return m_min; }
+ public double getMax() { return m_max; }
+ public StructuralLikelihoodQuantity getQuantity() { return m_quantity; }
+
+ public LikelihoodDistribution(StructuralLikelihoodQuantity quantity, int nbins, double min, double max, boolean useUnderFlow, boolean useOverFlow)
+ {
+ m_quantity = quantity;
+ m_nbins = nbins;
+ m_min = min;
+ m_max = max;
+ m_useUnderFlow = useUnderFlow;
+ m_useOverFlow = useOverFlow;
+
+ m_binContent = new double[m_nbins];
+ for (int i=0; i<m_nbins; i++) {
+ m_binContent[i] = 0.0;
+ }
+ m_underFlow = 0.0;
+ m_overFlow = 0.0;
+
+ m_normalized = false;
+ }
+
+ protected void fill(double value, double weight)
+ {
+ // Is it in one of the main bins?
+ if (value>=m_min && value<=m_max) {
+ // Yes -- which one? Watch out for a
+ // rounding problem...
+ double frac = ((value-m_min)/(m_max-m_min));
+ double binIndex_double = frac * ( (double)(m_nbins) );
+ int binIndex = (int) (binIndex_double);
+ if (binIndex<0 || binIndex>=m_nbins) {
+ throw new AssertionError("Help! In range ("+m_min+","+m_max+") with "+m_nbins+" bins, a value of "+value+" got mapped to bin "+binIndex);
+ }
+ m_binContent[binIndex] += weight;
+ } else if (value < m_min) {
+ // Underflow
+ m_underFlow += weight;
+ } else {
+ // Overflow
+ m_overFlow += weight;
+ }
+
+ // We've just filled => not normalized right now.
+ m_normalized = false;
+ }
+
+ public void fill(Cluster clus1, Cluster clus2) {
+ try {
+ double quantityValue = m_quantity.evaluate(clus1, clus2);
+ fill(quantityValue);
+ } catch (QuantityNotDefinedException x) {
+ // Quantity not valid
+ System.out.println("Warning: "+x);
+ }
+ }
+
+ protected void fill(double value) {
+ fill(value, 1.0);
+ }
+
+ public void normalize() {
+ // Normalize the PDF so it sums to 1.
+ // [assert m_nbins = m_binContent.length]
+
+ // Find the total...
+ m_normalization = 0.0;
+ for (int i=0; i<m_binContent.length; i++) {
+ m_normalization += m_binContent[i];
+ }
+ if (m_useUnderFlow) {
+ m_normalization += m_underFlow;
+ }
+ if (m_useOverFlow) {
+ m_normalization += m_overFlow;
+ }
+
+ // Done
+ m_normalized = true;
+ }
+
+ public double getPDF(Cluster clus1, Cluster clus2) throws QuantityNotDefinedException
+ {
+ double value = 0;
+ try {
+ value = m_quantity.evaluate(clus1, clus2);
+ } catch (QuantityNotDefinedException x) {
+ // Not defined
+ throw x;
+ }
+ if (!m_normalized) { normalize(); }
+ if (m_normalization == 0.0) {
+ System.out.println("WARNING in "+this.getClass().getName()+": computing PDF when normalization="+m_normalization);
+ }
+ // Is it in one of the main bins?
+ if (value>=m_min && value<=m_max) {
+ // Yes -- which one? Watch out for a
+ // rounding problem...
+ double frac = ((value-m_min)/(m_max-m_min));
+ double binIndex_double = frac * ( (double)(m_nbins) );
+ int binIndex = (int) (binIndex_double);
+ return m_binContent[binIndex]/m_normalization;
+ } else if (value < m_min) {
+ // Underflow
+ if (!m_useUnderFlow) { throw new AssertionError("Underflow for ["+m_quantity+"]: "+value+" (allowed range: "+m_min+" to "+m_max+")"); }
+ return m_underFlow/m_normalization;
+ } else {
+ // Overflow
+ if (!m_useOverFlow) { throw new AssertionError("Overflow for "+m_quantity+"]: "+value+" (allowed range: "+m_min+" to "+m_max+")"); }
+ return m_overFlow/m_normalization;
+ }
+ }
+
+ public double getPDF(int bin)
+ {
+ if (!m_normalized) {
+ normalize();
+ }
+ if (m_normalization == 0.0) {
+ System.out.println("WARNING in "+this.getClass().getName()+": computing PDF when normalization="+m_normalization);
+ }
+
+ if (bin>=0 && bin<m_nbins) {
+ // regular bin
+ return m_binContent[bin]/m_normalization;
+ } else if (bin==-1) {
+ // underflow bin
+ if (m_useUnderFlow) {
+ return m_underFlow/m_normalization;
+ } else {
+ throw new AssertionError("Tried to use underflow bin when not enabled");
+ }
+ } else if (bin==m_nbins) {
+ // overflow bin
+ if (m_useOverFlow) {
+ return m_overFlow/m_normalization;
+ } else {
+ throw new AssertionError("Tried to use overflow bin when not enabled");
+ }
+ } else {
+ throw new AssertionError("Bin index out of range: "+bin+" not in (-1, "+m_nbins+")");
+ }
+ }
+
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N LikelihoodEvaluator.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ LikelihoodEvaluator.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,247 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Vector;
+
+import java.io.FileOutputStream;
+import java.io.FileInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.io.IOException;
+
+import hep.aida.ITree;
+import hep.aida.IAnalysisFactory;
+import hep.aida.IHistogramFactory;
+import hep.aida.IHistogram1D;
+
+import org.lcsim.event.Cluster;
+
+public class LikelihoodEvaluator implements java.io.Serializable {
+
+ Map<Boolean, Vector<LikelihoodDistribution> > m_LikelihoodDistributionsTrackToTrack = null;
+ Map<Boolean, Vector<LikelihoodDistribution> > m_LikelihoodDistributionsTrackToClump = null;
+ Map<Boolean, Vector<LikelihoodDistribution> > m_LikelihoodDistributionsClumpToClump = null;
+ transient boolean m_debug; // default value is false
+
+ public LikelihoodEvaluator()
+ {
+ // Maps from Boolean (the tag) to Distribution
+ m_LikelihoodDistributionsTrackToTrack = new HashMap<Boolean, Vector<LikelihoodDistribution> >();
+ m_LikelihoodDistributionsTrackToClump = new HashMap<Boolean, Vector<LikelihoodDistribution> >();
+ m_LikelihoodDistributionsClumpToClump = new HashMap<Boolean, Vector<LikelihoodDistribution> >();
+ m_LikelihoodDistributionsTrackToTrack.put(Boolean.valueOf(true), new Vector<LikelihoodDistribution>());
+ m_LikelihoodDistributionsTrackToTrack.put(Boolean.valueOf(false), new Vector<LikelihoodDistribution>());
+ m_LikelihoodDistributionsTrackToClump.put(Boolean.valueOf(true), new Vector<LikelihoodDistribution>());
+ m_LikelihoodDistributionsTrackToClump.put(Boolean.valueOf(false), new Vector<LikelihoodDistribution>());
+ m_LikelihoodDistributionsClumpToClump.put(Boolean.valueOf(true), new Vector<LikelihoodDistribution>());
+ m_LikelihoodDistributionsClumpToClump.put(Boolean.valueOf(false), new Vector<LikelihoodDistribution>());
+ }
+
+ public List<StructuralLikelihoodQuantity> getLikelihoodQuantities() {
+ Vector<StructuralLikelihoodQuantity> vOut = new Vector<StructuralLikelihoodQuantity>();
+ vOut.addAll(getLikelihoodQuantitiesTrackToTrack());
+ vOut.addAll(getLikelihoodQuantitiesTrackToClump());
+ vOut.addAll(getLikelihoodQuantitiesClumpToClump());
+ return vOut;
+ }
+ public List<StructuralLikelihoodQuantity> getLikelihoodQuantitiesTrackToTrack() {
+ return getLikelihoodQuantities(m_LikelihoodDistributionsTrackToTrack.get(Boolean.valueOf(true)));
+ }
+ public List<StructuralLikelihoodQuantity> getLikelihoodQuantitiesTrackToClump() {
+ return getLikelihoodQuantities(m_LikelihoodDistributionsTrackToClump.get(Boolean.valueOf(true)));
+ }
+ public List<StructuralLikelihoodQuantity> getLikelihoodQuantitiesClumpToClump() {
+ return getLikelihoodQuantities(m_LikelihoodDistributionsClumpToClump.get(Boolean.valueOf(true)));
+ }
+ protected List<StructuralLikelihoodQuantity> getLikelihoodQuantities(Vector<LikelihoodDistribution> vect) {
+ Vector<StructuralLikelihoodQuantity> vOut = new Vector<StructuralLikelihoodQuantity>();
+ for (LikelihoodDistribution dist : vect) {
+ StructuralLikelihoodQuantity quant = dist.getQuantity();
+ vOut.add(quant);
+ }
+ return vOut;
+ }
+
+ public void addLikelihoodQuantityTrackToTrack(StructuralLikelihoodQuantity quant, int nbins, double min, double max, boolean useUnderFlow, boolean useOverFlow) {
+ Vector<LikelihoodDistribution> vTrue = m_LikelihoodDistributionsTrackToTrack.get(Boolean.valueOf(true));
+ Vector<LikelihoodDistribution> vFalse = m_LikelihoodDistributionsTrackToTrack.get(Boolean.valueOf(false));
+ vTrue .add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+ vFalse.add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+ }
+ public void addLikelihoodQuantityTrackToClump(StructuralLikelihoodQuantity quant, int nbins, double min
+, double max, boolean useUnderFlow, boolean useOverFlow) {
+ Vector<LikelihoodDistribution> vTrue = m_LikelihoodDistributionsTrackToClump.get(Boolean.valueOf(true));
+ Vector<LikelihoodDistribution> vFalse = m_LikelihoodDistributionsTrackToClump.get(Boolean.valueOf(false));
+ vTrue .add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+ vFalse.add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+ }
+ public void addLikelihoodQuantityClumpToClump(StructuralLikelihoodQuantity quant, int nbins, double min
+, double max, boolean useUnderFlow, boolean useOverFlow) {
+ Vector<LikelihoodDistribution> vTrue = m_LikelihoodDistributionsClumpToClump.get(Boolean.valueOf(true));
+ Vector<LikelihoodDistribution> vFalse = m_LikelihoodDistributionsClumpToClump.get(Boolean.valueOf(false));
+ vTrue .add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+ vFalse.add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+ }
+
+ public Vector<LikelihoodDistribution> getLikelihoodDistributionTrackToTrack(boolean goodLink) {
+ Boolean isLinkCorrect = Boolean.valueOf(goodLink);
+ Vector<LikelihoodDistribution> vDistributions = m_LikelihoodDistributionsTrackToTrack.get(isLinkCorrect);
+ return vDistributions;
+ }
+ public Vector<LikelihoodDistribution> getLikelihoodDistributionTrackToClump(boolean goodLink) {
+ Boolean isLinkCorrect = Boolean.valueOf(goodLink);
+ Vector<LikelihoodDistribution> vDistributions = m_LikelihoodDistributionsTrackToClump.get(isLinkCorrect);
+ return vDistributions;
+ }
+ public Vector<LikelihoodDistribution> getLikelihoodDistributionClumpToClump(boolean goodLink) {
+ Boolean isLinkCorrect = Boolean.valueOf(goodLink);
+ Vector<LikelihoodDistribution> vDistributions = m_LikelihoodDistributionsClumpToClump.get(isLinkCorrect);
+ return vDistributions;
+ }
+
+ public double getLinkLikelihoodTrackToTrack(Cluster clus1, Cluster clus2) {
+ Vector<LikelihoodDistribution> vLinked = getLikelihoodDistributionTrackToTrack(true);
+ Vector<LikelihoodDistribution> vUnlinked = getLikelihoodDistributionTrackToTrack(false);
+ return getLinkLikelihood(clus1, clus2, vLinked, vUnlinked);
+ }
+ public double getLinkLikelihoodTrackToClump(Cluster clus1, Cluster clus2) {
+ Vector<LikelihoodDistribution> vLinked = getLikelihoodDistributionTrackToClump(true);
+ Vector<LikelihoodDistribution> vUnlinked = getLikelihoodDistributionTrackToClump(false);
+ return getLinkLikelihood(clus1, clus2, vLinked, vUnlinked);
+ }
+ public double getLinkLikelihoodClumpToClump(Cluster clus1, Cluster clus2) {
+ Vector<LikelihoodDistribution> vLinked = getLikelihoodDistributionClumpToClump(true);
+ Vector<LikelihoodDistribution> vUnlinked = getLikelihoodDistributionClumpToClump(false);
+ return getLinkLikelihood(clus1, clus2, vLinked, vUnlinked);
+ }
+
+ protected double getLinkLikelihood(Cluster clus1, Cluster clus2, Vector<LikelihoodDistribution> vLinked, Vector<LikelihoodDistribution> vUnlinked)
+ {
+ double totalGoodLikelihood = 1.0;
+ double totalBadLikelihood = 1.0;
+ for (int i=0; i<vLinked.size(); i++) {
+ LikelihoodDistribution distLinked = vLinked.get(i);
+ LikelihoodDistribution distUnlinked = vUnlinked.get(i);
+ try {
+ double goodPDF = distLinked.getPDF(clus1, clus2);
+ double badPDF = distUnlinked.getPDF(clus1, clus2);
+ double goodProb = goodPDF/(goodPDF+badPDF);
+ double badProb = badPDF/(goodPDF+badPDF);
+ totalGoodLikelihood *= goodProb;
+ totalBadLikelihood *= badProb;
+ } catch (QuantityNotDefinedException x) {
+ // Have to ignore this one.
+ System.out.println("Warning: "+x);
+ }
+ }
+ double normalizedGoodLikelihood = totalGoodLikelihood / (totalGoodLikelihood+totalBadLikelihood);
+ return normalizedGoodLikelihood;
+ }
+
+ // A static routine to read from a serialised file:
+ static public LikelihoodEvaluator readFromFile(String filename)
+ {
+ try {
+ FileInputStream fis = new FileInputStream(filename);
+ ObjectInputStream ois = new ObjectInputStream(fis);
+ Object o = ois.readObject();
+ LikelihoodEvaluator eval = (LikelihoodEvaluator) o;
+ return eval;
+ } catch (java.io.IOException x) {
+ throw new AssertionError("java.io.IOException: "+x);
+ } catch (java.lang.ClassNotFoundException x) {
+ throw new AssertionError("java.lang.ClassNotFoundException: "+x);
+ }
+ }
+
+ public void writeToFile(String filename)
+ {
+ // Open the file for writing:
+ try {
+ FileOutputStream fos = new FileOutputStream(filename);
+ ObjectOutputStream oos = new ObjectOutputStream(fos);
+ oos.writeObject(this);
+ } catch (java.io.IOException x) {
+ throw new AssertionError("java.io.IOException: "+x);
+ }
+ }
+
+ // Routines to make interesting plots:
+
+ /**
+ * Write the likelihood distributions out to the given file
+ */
+ public void makePlots(String filename)
+ {
+ IAnalysisFactory af = IAnalysisFactory.create();
+ try {
+ ITree t = af.createTreeFactory().create(filename,"xml",false,true);
+ IHistogramFactory hf = af.createHistogramFactory(t);
+ makePlots(hf, "hTrackTrack", m_LikelihoodDistributionsTrackToTrack);
+ makePlots(hf, "hTrackClump", m_LikelihoodDistributionsTrackToClump);
+ makePlots(hf, "hClumpClump", m_LikelihoodDistributionsClumpToClump);
+ t.commit();
+ } catch(IOException ioe1) {
+ ioe1.printStackTrace();
+ }
+ }
+
+ /**
+ * Make the plots for a particular set of variables (clump-clump, track-track, track-clump)
+ */
+ protected void makePlots(IHistogramFactory hf, String name1, Map<Boolean, Vector<LikelihoodDistribution> > map)
+ {
+ for (LikelihoodDistribution dist : map.get(Boolean.valueOf(true))) {
+ makePlot(hf, name1, "Signal", dist);
+ }
+ for (LikelihoodDistribution dist : map.get(Boolean.valueOf(false))) {
+ makePlot(hf, name1, "Background", dist);
+ }
+ }
+
+ /**
+ * Make one plot
+ */
+ protected void makePlot(IHistogramFactory hf, String name1, String name2, LikelihoodDistribution dist)
+ {
+ String subName = dist.getQuantity().getClass().getName();
+ String fullName = new String(name1 + "_" + name2 + "_" + subName);
+ if (m_debug) { System.out.println("DEBUG: Making histogram for ["+fullName+"]"); }
+ int nbins = dist.getNbins();
+ double min = dist.getMin();
+ double max = dist.getMax();
+ double binsize = (max-min) / ( (double)(nbins) );
+ int histo_nbins = nbins;
+ double histo_min = min;
+ double histo_max = max;
+ if (dist.useUnderFlow()) {
+ histo_nbins++;
+ histo_min -= binsize;
+ }
+ if (dist.useOverFlow()) {
+ histo_nbins++;
+ histo_max += binsize;
+ }
+ IHistogram1D histo = hf.createHistogram1D(fullName, histo_nbins, histo_min, histo_max);
+ for (int i=0; i<nbins; i++) {
+ double binCenter = min + binsize*( (double)(i) + 0.5);
+ histo.fill(binCenter, dist.getPDF(i));
+ if (m_debug) { System.out.println(" "+i+": "+dist.getPDF(i)); }
+ }
+ if (dist.useUnderFlow()) {
+ histo.fill(min-0.5*binsize, dist.getPDF(-1));
+ if (m_debug) { System.out.println(" underflow: "+dist.getPDF(-1)); }
+ }
+ if (dist.useOverFlow()) {
+ histo.fill(max+0.5*binsize, dist.getPDF(nbins));
+ if (m_debug) { System.out.println(" overflow: "+dist.getPDF(nbins)); }
+ }
+ }
+
+ public void setDebug(boolean debug) {
+ m_debug = debug;
+ }
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N LikelihoodEvaluatorCheckpointDriver.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ LikelihoodEvaluatorCheckpointDriver.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,33 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.EventHeader;
+import org.lcsim.util.Driver;
+
+
+public class LikelihoodEvaluatorCheckpointDriver extends Driver
+{
+ protected LikelihoodEvaluator m_eval = null;
+ protected int m_frequency = -1;
+
+ public LikelihoodEvaluatorCheckpointDriver(LikelihoodEvaluator eval, int frequency) {
+ m_eval = eval;
+ m_frequency = frequency;
+ }
+
+ protected int m_count = 0;
+ public void process(EventHeader event) {
+ m_count++;
+ if (m_eval != null) {
+ if (m_frequency>0 && m_count % m_frequency == 0) {
+ m_eval.writeToFile("likelihood.bin");
+ m_eval.makePlots("likelihoodDistributions.aida");
+ }
+ }
+ }
+
+ public void suspend() {
+ m_eval.writeToFile("likelihood.bin");
+ m_eval.makePlots("likelihoodDistributions.aida");
+ super.suspend();
+ }
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N MiscUtilities.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ MiscUtilities.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,329 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+import hep.physics.vec.VecOp;
+
+import org.lcsim.event.CalorimeterHit;
+import org.lcsim.event.Cluster;
+import org.lcsim.recon.cluster.util.BasicCluster;
+import org.lcsim.recon.cluster.util.TensorClusterPropertyCalculator;
+import org.lcsim.geometry.layer.Layering;
+import org.lcsim.geometry.CylindricalSubdetector;
+//import org.lcsim.geometry.Subdetector;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.MCParticle;
+import org.lcsim.event.SimCalorimeterHit;
+import org.lcsim.event.EventHeader;
+
+class MiscUtilities
+{
+ //MiscUtilities() {}
+
+ /**
+ * Get the minimum distance between hits in the two clusters.
+ * If one or both of the clusters is empty, will return NaN.
+ */
+ static protected double distance(Cluster clus1, Cluster clus2)
+ {
+ // Loop over hits...
+ boolean firstCheck = true;
+ double minDistance = Double.NaN; // Will stay NaN if either is empty
+ List<CalorimeterHit> hits1 = clus1.getCalorimeterHits();
+ List<CalorimeterHit> hits2 = clus2.getCalorimeterHits();
+ for (CalorimeterHit hit1 : hits1) {
+ for (CalorimeterHit hit2 : hits2) {
+ double dist = distance(hit1, hit2);
+ if (firstCheck || dist<minDistance) {
+ minDistance = dist;
+ firstCheck = false;
+ }
+ }
+ }
+
+ return minDistance;
+ }
+
+ static protected double distance(Cluster clus, CalorimeterHit hit)
+ {
+ Hep3Vector point = new BasicHep3Vector(hit.getPosition());
+ return distance(clus, point);
+ }
+
+ static protected double distance(CalorimeterHit hit1, CalorimeterHit hit2)
+ {
+ Hep3Vector point2 = new BasicHep3Vector(hit2.getPosition());
+ return distance(hit1, point2);
+ }
+
+ static protected double distance(Cluster clus, Hep3Vector point)
+ {
+ boolean firstCheck = true;
+ double minDistance = Double.NaN; // Will stay NaN if clus is empty
+ List<CalorimeterHit> hits = clus.getCalorimeterHits();
+ for (CalorimeterHit hitInCluster : hits) {
+ double dist = distance(hitInCluster, point);
+ if (firstCheck || dist<minDistance) {
+ minDistance = dist;
+ firstCheck = false;
+ }
+ }
+ return minDistance;
+ }
+
+ static protected double distance(CalorimeterHit hit, Hep3Vector point) {
+ Hep3Vector hitPoint = new BasicHep3Vector(hit.getPosition());
+ Hep3Vector displacement = VecOp.sub(hitPoint, point);
+ return displacement.magnitude();
+ }
+
+ // Make a line from a cluster
+ // Called by TrackToTrackDOCA.evaluate()
+ static protected Line makeLine(Cluster clus) {
+ Hep3Vector[] posAndDir1 = MiscUtilities.getPositionAndDirection(clus);
+ Line line = new Line(posAndDir1[0], posAndDir1[1]);
+ return line;
+ }
+
+ // Find the direction of a cluster
+ // Called by makeLine(), which is called by TrackToTrackDOCA.evaluate()
+ static protected Hep3Vector[] getPositionAndDirection(Cluster clus)
+ {
+ if (clus.getCalorimeterHits().size() < 4) {
+ // Too few hits to calculate the tensor
+ return null;
+ }
+
+ BasicCluster copy = new BasicCluster();
+ copy.addCluster(clus);
+ TensorClusterPropertyCalculator calc = new TensorClusterPropertyCalculator();
+ copy.setPropertyCalculator(calc);
+ copy.calculateProperties();
+ double[][]axes = calc.getPrincipleAxis();
+ if (axes == null) {
+ throw new AssertionError("Principal axes not calculated");
+ }
+ Hep3Vector[] posAndDir = new Hep3Vector[2];
+ posAndDir[0] = new BasicHep3Vector(copy.getPosition());
+ posAndDir[1] = new BasicHep3Vector(axes[0][0], axes[0][1], axes[0][2]);
+ return posAndDir;
+ }
+
+ static protected org.lcsim.geometry.Subdetector findComponent(Hep3Vector point, org.lcsim.geometry.Detector det)
+ {
+ if (det==null) { System.out.println("WARNING in MiscUtilities: det is null. I will crash..."); }
+ Map<String, org.lcsim.geometry.compact.Subdetector> subdetectorsMap = det.getSubdetectors();
+ Set<Map.Entry<String, org.lcsim.geometry.compact.Subdetector>> subdetectorsSet = subdetectorsMap.entrySet();
+ for (Map.Entry<String, org.lcsim.geometry.compact.Subdetector> subdetectorPair : subdetectorsSet) {
+ String name = subdetectorPair.getKey();
+ org.lcsim.geometry.compact.Subdetector subdet = subdetectorPair.getValue();
+ // Try to cast it to a cylinder...
+ if (subdet instanceof CylindricalSubdetector) {
+ CylindricalSubdetector cylinder = (CylindricalSubdetector) (subdet);
+ double innerR = cylinder.getInnerRadius();
+ double outerR = cylinder.getOuterRadius();
+ double minZ = cylinder.getZMin();
+ double maxZ = cylinder.getZMax();
+ // Are we inside?
+ double pointR = Math.sqrt(point.x()*point.x() + point.y()*point.y());
+ double pointZ = point.z();
+ if (pointR > innerR && pointR < outerR && pointZ > minZ && pointZ < maxZ) {
+ return subdet;
+ }
+ }
+ }
+
+ return null; // Couldn't find a cylindrical detector containing this point
+ }
+
+ /*
+ *
+ * Broken by changes to the Subdetector interface...
+
+ static protected org.lcsim.geometry.layer.Layer findLayer(Hep3Vector point, org.lcsim.geometry.Detector det)
+ {
+ org.lcsim.geometry.Subdetector subdet = MiscUtilities.findComponent(point, det);
+ if (subdet == null) {
+ // Point doesn't fall inside a cylindrical subdetector
+ return null;
+ }
+ if ( ! (subdet instanceof CylindricalSubdetector) ) {
+ throw new AssertionError("Asked to find layer info for subdetector "+subdet+" of class "+subdet.getClass().getName()+", which is not a cylinder");
+ }
+ if (!subdet.isLayered()) {
+ throw new AssertionError("Asked to find layer info for subdetector "+subdet+" of class "+subdet.getClass().getName()+", which is not layered");
+ }
+ CylindricalSubdetector cylinder = (CylindricalSubdetector) (subdet);
+ Layering layers = subdet.getLayering();
+ // Barrel: Spans from -DeltaZ/2 to +DeltaZ/2
+ // Endcap: Spans from +z to +z+DeltaZ or from -z to -Z-Deltaz
+ boolean isEndcap = (cylinder.getZMin() * cylinder.getZMax() > 0.0);
+ // Now, if this is a barrel-type detector, the layers are at constant r and go outwards in steps of z.
+ // If this is an endcap-type detector, the layers are at constant z and go outwards in steps of r.
+ double longitudinalCoord = 0.0;
+ double sign = 1.0;
+ if (isEndcap) {
+ longitudinalCoord = Math.sqrt(point.x()*point.x() + point.y()*point.y());
+ } else {
+ longitudinalCoord = point.z();
+ if (longitudinalCoord<0.0) {
+ sign = -1.0;
+ }
+ }
+ // Which layer are we in?
+ // Note that we have to be careful about the sign in the case where z<0...
+ int numLayers = layers.getLayerCount();
+ org.lcsim.geometry.layer.Layer matchingLayer = null;
+ for (int iLayer=0; iLayer<numLayers; iLayer++) {
+ double layerAbsMin = sign*layers.getDistanceToLayer(iLayer);
+ double layerAbsMax = layerAbsMin + layers.getLayer(iLayer).getThickness();
+ if (sign*longitudinalCoord>=layerAbsMin && sign*longitudinalCoord<layerAbsMax) {
+ matchingLayer = layers.getLayer(iLayer);
+ break;
+ }
+ }
+ // OK, we've found the layer (or not). Do some sanity checks:
+ // Total thickness must match sum of thicknesses, and layer distances
+ // must be monotonic increasing:
+ for (int iLayer=0; iLayer<numLayers-1; iLayer++) {
+ double thickness = layers.getLayer(iLayer).getThickness();
+ double thisLayerAbsMin = sign*layers.getDistanceToLayer(iLayer);
+ double thisLayerAbsMax = thisLayerAbsMin + thickness;
+ double nextLayerAbsMin = sign*layers.getDistanceToLayer(iLayer+1);
+ if (thisLayerAbsMin > thisLayerAbsMax) {
+ throw new AssertionError("Layer "+iLayer+" min = "+thisLayerAbsMin+" and layer "+iLayer+" max = "+thisLayerAbsMax+" -- not allowed.");
+ }
+ if (thisLayerAbsMax > nextLayerAbsMin) {
+ throw new AssertionError("Layer "+(iLayer+1)+" min = "+nextLayerAbsMin+" and layer "+iLayer+" max = "+thisLayerAbsMax+" -- not allowed.");
+ }
+ // Check that there are no holes:
+ double gap = (nextLayerAbsMin - thisLayerAbsMax);
+ if ( Math.abs(gap) > 0.001) {
+ throw new AssertionError("Layer "+iLayer+" max = "+thisLayerAbsMax+" and layer "+iLayer+" min = "+nextLayerAbsMin+" so gap = "+gap+" -- not allowed.");
+ }
+ }
+ return matchingLayer;
+ }
+
+ *
+ */
+
+ /*
+ * // Blah... belongs in Line.
+ * static protected double findDOCAToPoint(Hep3Vector linePoint, Hep3Vector lineDirection, Hep3Vector point)
+ * {
+ * // The first line is kind of ugly.
+ * Hep3Vector displacement = new BasicHep3Vector(point.x() - linePoint.x(), point.y() - linePoint.y(), point.z() - linePoint.z());
+ * double dotProduct = VecOp.dot(displacement, lineDirection);
+ * double doca = Math.sqrt(displacement.magnitudeSquared() - dotProduct*dotProduct);
+ * return doca;
+ * }
+ */
+
+ /*
+ * static protected double correctedEnergyInClusterECAL(MCParticle part, Cluster clus)
+ * {
+ * double energyECAL = 0.0;
+ * for (CalorimeterHit hit : clus.getCalorimeterHits()) {
+ * boolean useThisHit = true;
+ * if (part != null) {
+ * useThisHit = hitMatch(part, (SimCalorimeterHit)(hit));
+ * }
+ * if (useThisHit) {
+ * org.lcsim.geometry.Subdetector subdet = hit.getSubdetector();
+ * if ( ! subdet.isCalorimeter() ) { throw new AssertionError("Cluster hit outside calorimeter"); }
+ * String name = subdet.getName();
+ * if (name.compareTo("EMBarrel") == 0 || name.compareTo("EMEndcap") == 0) {
+ * // ECAL
+ * SimCalorimeterHit simHit = (SimCalorimeterHit) hit;
+ * double rawEnergy = hitMatchEnergy(part, simHit);
+ * double rawEnergyTotal = hit.getRawEnergy();
+ * double frac = (rawEnergy / rawEnergyTotal);
+ * energyECAL += frac * hit.getCorrectedEnergy();
+ * }
+ * }
+ * }
+ * return energyECAL;
+ * }
+ */
+
+ /*
+ * static protected int countHitsInClusterHCAL(MCParticle part, Cluster clus)
+ * {
+ * int countHitsHCAL = 0;
+ * for (CalorimeterHit hit : clus.getCalorimeterHits()) {
+ * boolean useThisHit = true;
+ * if (part != null) {
+ * useThisHit = hitMatch(part, (SimCalorimeterHit)(hit));
+ * }
+ * if (useThisHit) {
+ * org.lcsim.geometry.Subdetector subdet = hit.getSubdetector();
+ * if ( ! subdet.isCalorimeter() ) { throw new AssertionError("Cluster hit outside calorimeter"); }
+ * String name = subdet.getName();
+ * if (name.compareTo("HADBarrel") == 0 || name.compareTo("HADEndcap") == 0) {
+ * // HCAL
+ * if (hit.getTime() < 100) {
+ * // Within first 100 ns => OK
+ * countHitsHCAL++;
+ * }
+ * }
+ * }
+ * }
+ * return countHitsHCAL;
+ * }
+ */
+
+ /*
+ * static protected boolean hitMatch(MCParticle part, SimCalorimeterHit hit) {
+ * int nContributingParticles = hit.getMCParticleCount();
+ * for (int i=0; i<nContributingParticles; i++) {
+ * MCParticle hitPart = hit.getMCParticle(i);
+ * if (part == hitPart) {
+ * return true;
+ * }
+ * }
+ * return false;
+ * }
+ */
+
+ /*
+ * static protected double hitMatchEnergy(MCParticle part, SimCalorimeterHit hit) {
+ * int nContributingParticles = hit.getMCParticleCount();
+ * double energySum = 0.0;
+ * for (int i=0; i<nContributingParticles; i++) {
+ * MCParticle hitPart = hit.getMCParticle(i);
+ * if (part == hitPart || part==null) {
+ * energySum += hit.getContributedEnergy(i);
+ * }
+ * }
+ * return energySum;
+ * }
+ */
+
+ /*
+ * static protected int countHitsInCluster(MCParticle part, Cluster clus)
+ * {
+ * int countHits = 0;
+ * for (CalorimeterHit hit : clus.getCalorimeterHits()) {
+ * if (hit instanceof SimCalorimeterHit) {
+ * SimCalorimeterHit simHit = (SimCalorimeterHit) (hit);
+ * int numContributingParticles = simHit.getMCParticleCount();
+ * for (int i=0; i<numContributingParticles; i++) {
+ * MCParticle contributingParticle = simHit.getMCParticle(i);
+ * if (part == contributingParticle) {
+ * // Match
+ * countHits++;
+ * }
+ * }
+ * } else {
+ * throw new AssertionError("Non-simulated calorimeter hit");
+ * }
+ * }
+ * return countHits;
+ * }
+ */
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N QuantityNotDefinedException.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ QuantityNotDefinedException.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,7 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import java.lang.String;
+
+public class QuantityNotDefinedException extends java.lang.Exception {
+ public QuantityNotDefinedException(String m) { super(m); }
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N StructuralLikelihoodQuantity.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ StructuralLikelihoodQuantity.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,21 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+// Hm. What if some quantities are integer?
+// For now, assume everything is double.
+
+import org.lcsim.event.Cluster;
+
+/**
+ * Interface for individual likelihood quantities used in structural algorithm.
+ */
+
+public interface StructuralLikelihoodQuantity extends java.io.Serializable
+{
+ /**
+ * Evaluate the quantity for a pair of clusters. (For example, this could return
+ * the separation between two clusters.) If the quantity is not well-defined for
+ * these two clusters, a QuantityNotDefinedException is thrown. (For example, if
+ * the algorithm tries to calculate the moments of a cluster with only one hit.)
+ */
+ public double evaluate(Cluster clus1, Cluster clus2) throws QuantityNotDefinedException;
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N StructuralLikelihoodQuantityWithEventInfo.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ StructuralLikelihoodQuantityWithEventInfo.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,22 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.event.EventHeader;
+
+/**
+ * This is an extension of the StructuralLikelihoodQuantity interface to cover the
+ * case where a quantity needs per-event information. (For example, if a quantity
+ * needs access to geometry information, or to the list of all hits in the event.)
+ * Once per event, the method setEventInfo() must be called; the object then caches
+ * this information in an implementation-dependent way. The value of the quantity
+ * for a given pair of clusters is still calculated with the parent interface's
+ * evaluate() method.
+ */
+public interface StructuralLikelihoodQuantityWithEventInfo extends StructuralLikelihoodQuantity
+{
+ /**
+ * Cache the per-event information. Must be done every event before any calls
+ * to evaluate().
+ */
+ public void setEventInfo(EventHeader event);
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N TrackToClumpDOCA.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ TrackToClumpDOCA.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,23 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+
+public class TrackToClumpDOCA implements StructuralLikelihoodQuantity
+{
+ public TrackToClumpDOCA() {}
+
+ public double evaluate(Cluster track, Cluster clump)
+ {
+ Hep3Vector positionTrack = new BasicHep3Vector(track.getPosition());
+ Hep3Vector positionClump = new BasicHep3Vector(clump.getPosition());
+ Trajectory line = new Line(positionTrack, track.getIPhi(), track.getITheta());
+ double doca = Math.abs(line.getDistanceToPoint(positionClump));
+ return doca;
+ }
+
+
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N TrackToTrackDOCA.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ TrackToTrackDOCA.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,51 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+import hep.physics.vec.VecOp;
+
+/**
+ * This quantity calculates the distance of closest approach (DOCA)
+ * of two track-like clusters. The clusters are treated as straight
+ * lines, with position and direction derived from the energy tensor.
+ *
+ * Note that here the DOCA is the distance from one track to the
+ * other, not the distance from each track to the POCA.
+ */
+
+public class TrackToTrackDOCA implements StructuralLikelihoodQuantity
+{
+ public TrackToTrackDOCA() {}
+
+ /**
+ * Find the DOCA. Each cluster must have at least 4 hits to calculate the
+ * position and direction properly; if not this will fail and throw a
+ * QuantityNotDefinedException.
+ */
+ public double evaluate(Cluster track1, Cluster track2) throws QuantityNotDefinedException
+ {
+ if (track1.getCalorimeterHits().size()<4 || track2.getCalorimeterHits().size()<4) {
+ throw new QuantityNotDefinedException("Need 4+ hits in each cluster to get direction, but they have "
+ +track1.getCalorimeterHits().size()
+ +" and "
+ +track2.getCalorimeterHits().size()
+ +" respectively.");
+ }
+
+ // Need to find directions of the two clusters.
+ Line line1 = MiscUtilities.makeLine(track1);
+ Line line2 = MiscUtilities.makeLine(track2);
+
+ double[] distancesAlongLinesToPOCAs = Line.getPOCAOfLines(line1, line2);
+
+ Hep3Vector poca1 = line1.getPointAtDistance(distancesAlongLinesToPOCAs[0]);
+ Hep3Vector poca2 = line2.getPointAtDistance(distancesAlongLinesToPOCAs[1]);
+ double doca = VecOp.sub(poca1,poca2).magnitude();
+ return doca;
+ }
+
+
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N TrackToTrackPOCAInCalorimeter.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ TrackToTrackPOCAInCalorimeter.java 1 Mar 2006 02:45:58 -0000 1.1
@@ -0,0 +1,92 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+import hep.physics.vec.VecOp;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.recon.cluster.util.TensorClusterPropertyCalculator;
+import org.lcsim.recon.cluster.util.BasicCluster;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.EventHeader;
+
+/**
+ * This quantity indicates whether the point of closest approach (POCA) of two track-like clusters
+ * is within the calorimeter. In the current implementation, the tracks are treated
+ * as straight lines; their position and direction are determined from the energy
+ * tensory of the cluster.
+ *
+ * The implementation uses geometry information derived from EventHeader. The
+ * method setEventInfo() MUST be called before evaluate() so that the geometry
+ * information can be cached. In general, setEventInfo() SHOULD be called
+ * at the start of every event.
+ */
+
+public class TrackToTrackPOCAInCalorimeter implements StructuralLikelihoodQuantityWithEventInfo
+{
+ /**
+ * Constructor.
+ */
+ public TrackToTrackPOCAInCalorimeter() {
+ }
+
+ /**
+ * Cache the per-event geometry info.
+ */
+ public void setEventInfo(EventHeader event) {
+ // Pick up geometry info from event
+ m_det = event.getDetector();
+ assert(m_det != null);
+ }
+
+ /**
+ * Find the POCA of two track-like clusters, and determine whether it's in the
+ * calorimeter (returns 1) or outside the calorimeter (returns 0). Currently,
+ * the calorimeter is defined as the set of all cylindrical subdetectors for which isCalorimeter()
+ * is true, but this definition may be updated later.
+ */
+ public double evaluate(Cluster track1, Cluster track2) throws QuantityNotDefinedException
+ {
+ if (track1.getCalorimeterHits().size()<4 || track2.getCalorimeterHits().size()<4) {
+ throw new QuantityNotDefinedException("Need 4+ hits in each cluster to get direction, but they have "
+ +track1.getCalorimeterHits().size()
+ +" and "
+ +track2.getCalorimeterHits().size()
+ +" respectively.");
+ }
+
+ // Need to find directions of the two clusters.
+ Line line1 = MiscUtilities.makeLine(track1);
+ Line line2 = MiscUtilities.makeLine(track2);
+
+ // Find the POCA:
+ double[] distancesAlongLinesToPOCAs = Line.getPOCAOfLines(line1, line2);
+ Hep3Vector nearestPointOnLine1 = line1.getPointAtDistance(distancesAlongLinesToPOCAs[0]);
+ Hep3Vector nearestPointOnLine2 = line2.getPointAtDistance(distancesAlongLinesToPOCAs[1]);
+ // Find the mean of these two points -- that's the POCA
+ Hep3Vector poca = VecOp.mult(0.5, VecOp.add(nearestPointOnLine1, nearestPointOnLine2));
+
+ boolean pocaInCalorimeter = isPointInCalorimeter(poca);
+ if (pocaInCalorimeter) {
+ return 1.0;
+ } else {
+ return 0.0;
+ }
+ }
+
+ /**
+ * Determine whether a point is inside the calorimeter or not.
+ */
+ protected boolean isPointInCalorimeter(Hep3Vector point) {
+ if (m_det==null) { System.out.println("WARNING: "+this.getClass().getName()+": m_det is null when checking a point. Crash likely."); }
+ org.lcsim.geometry.Subdetector subdet = MiscUtilities.findComponent(point, m_det);
+ return (subdet != null && subdet.isCalorimeter());
+ }
+
+ /**
+ * Cached per-event geometry information. Transient since
+ * we do not expect to resume from the same event when serialized.
+ */
+ transient private org.lcsim.geometry.Detector m_det = null;
+
+}
lcsim/src/org/lcsim/recon/cluster/structural/likelihood
diff -N TrackToTrackSmallestDistanceToPOCA.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ TrackToTrackSmallestDistanceToPOCA.java 1 Mar 2006 02:45:59 -0000 1.1
@@ -0,0 +1,34 @@
+package org.lcsim.recon.cluster.structural.likelihood;
+
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+import hep.physics.vec.VecOp;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.recon.cluster.util.TensorClusterPropertyCalculator;
+import org.lcsim.recon.cluster.util.BasicCluster;
+import org.lcsim.util.swim.Line;
+
+public class TrackToTrackSmallestDistanceToPOCA implements StructuralLikelihoodQuantity
+{
+ public TrackToTrackSmallestDistanceToPOCA() {}
+
+ public double evaluate(Cluster track1, Cluster track2) throws QuantityNotDefinedException
+ {
+ if (track1.getCalorimeterHits().size()<4 || track2.getCalorimeterHits().size()<4) {
+ throw new QuantityNotDefinedException("Need 4+ hits in each cluster to get direction, but they have "
+ +track1.getCalorimeterHits().size()
+ +" and "
+ +track2.getCalorimeterHits().size()
+ +" respectively.");
+ }
+
+ // Need to find directions of the two clusters.
+ Line line1 = MiscUtilities.makeLine(track1);
+ Line line2 = MiscUtilities.makeLine(track2);
+
+ double[] distancesAlongLinesToPOCAs = Line.getPOCAOfLines(line1, line2);
+
+ return Math.min(Math.abs(distancesAlongLinesToPOCAs[0]), Math.abs(distancesAlongLinesToPOCAs[1]));
+ }
+}
CVSspam 0.2.8