Commit in lcsim/src/org/lcsim/contrib/uiowa/structural on MAIN
ClusterAssociator.java+8added 1.1
ExamplePFA.java+60added 1.1
GenericStructuralDriver.java+74added 1.1
LikelihoodFindingStructuralDriver.java+177added 1.1
LikelihoodLinkDriver.java+176added 1.1
LikelihoodLinkPlotterDriver.java+98added 1.1
Link.java+24added 1.1
likelihood/ClumpToClumpDOCA.java+33added 1.1
          /ClusterToClusterMinDistance.java+13added 1.1
          /IntermediateHitFinder.java+28added 1.1
          /LikelihoodDistribution.java+164added 1.1
          /LikelihoodEvaluator.java+129added 1.1
          /MiscUtilities.java+60added 1.1
          /QuantityNotDefinedException.java+7added 1.1
          /StructuralLikelihoodQuantity.java+11added 1.1
          /TrackToClumpDOCA.java+23added 1.1
          /TrackToTrackDOCA.java+25added 1.1
          /TrackToTrackIntermediateHits.java+35added 1.1
          /TrackToTrackIntermediateHitsCount.java+24added 1.1
          /TrackToTrackIntermediateHitsFraction.java+29added 1.1
          /TrackToTrackPOCAInCalorimeter.java+15added 1.1
          /TrackToTrackSmallestDistanceToPOCA.java+13added 1.1
+1226
22 added files
Preliminary version of a structural algorithm which uses a likelihood-based selector to link track segments and clumps within clusters

lcsim/src/org/lcsim/contrib/uiowa/structural
ClusterAssociator.java added at 1.1
diff -N ClusterAssociator.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ ClusterAssociator.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,8 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import org.lcsim.event.Cluster;
+
+public interface ClusterAssociator
+{
+    public boolean isLinkCorrect(Cluster clus1, Cluster clus2);
+}

lcsim/src/org/lcsim/contrib/uiowa/structural
ExamplePFA.java added at 1.1
diff -N ExamplePFA.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ ExamplePFA.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,60 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import org.lcsim.event.EventHeader;
+import org.lcsim.util.Driver;
+import org.lcsim.recon.cluster.nn.NearestNeighborClusterDriver;
+
+import structural.likelihood.*;
+
+public class ExamplePFA extends Driver
+{
+    public ExamplePFA(boolean writeLikelihood) 
+    {
+	// Begin with a big-scale cluster set
+	// This will output the following lists:
+	//    EcalBarrHitsNNClusters
+	//    EcalEndcapHitsNNClusters
+	//    ForwardEcalEndcapHitsNNClusters
+	//    HcalBarrHitsNNClusters
+	//    HcalEndcapHitsNNClusters
+	//Driver nnDriver = new NearestNeighborClusterDriver(6, 6, 8, 1);
+	//add(nnDriver);
+
+	// ... or, make cluster lists with MST.
+	// MSTClusterDriver linkDriver = new MSTClusterDriver("user");
+        // ...
+
+	// Need to merge those lists/clusters somehow
+	// PairDecisionMaker dec = new LinkClusterDecision();
+
+	// Find MIPs within clusters
+
+	// Find clumps within clusters
+
+	// Run likelihood structural analysis
+	ClusterAssociator assoc = null;
+	if (writeLikelihood) {
+	    // Obtain and write likelihood histograms
+	    LikelihoodEvaluator eval = new LikelihoodEvaluator();
+	    LikelihoodFindingStructuralDriver likelihoodWriter = new LikelihoodFindingStructuralDriver(eval, assoc);
+	    add(likelihoodWriter);
+	} else {
+	    // Use pre-existing likelihood histograms to check clusters
+	    LikelihoodEvaluator eval = LikelihoodEvaluator.readFromFile("likelihood.bin");
+	    eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackDOCA(), 10, 0.0, 5.0, false, true);
+	    eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackPOCAInCalorimeter(), 2, -0.5, 1.5, false, false);
+	    eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackSmallestDistanceToPOCA(), 5, 0.0, 5.0, false, true);
+	    eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackIntermediateHitsCount(), 10, -0.5, 9.5, false, true);
+	    eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackIntermediateHitsFraction(), 11, -0.05, 1.05, false, false);
+	    eval.addLikelihoodQuantityTrackToClump(new TrackToClumpDOCA(), 5, 0.0, 25.0, false, true);
+	    eval.addLikelihoodQuantityTrackToClump(new ClusterToClusterMinDistance(), 5, 0.0, 25.0, false, true);
+	    eval.addLikelihoodQuantityClumpToClump(new ClumpToClumpDOCA(), 5, 0.0, 50.0, false, true);
+	    eval.addLikelihoodQuantityClumpToClump(new ClusterToClusterMinDistance(), 5, 0.0, 50.0, false, true);
+	    
+	    LikelihoodLinkPlotterDriver likelihoodPlotter = new LikelihoodLinkPlotterDriver(eval, 0.5, 0.5, 0.5, assoc);
+	    likelihoodPlotter.initPlots("likelihoodPerformance.aida");
+	    add(likelihoodPlotter);
+	}
+    }
+
+}

lcsim/src/org/lcsim/contrib/uiowa/structural
GenericStructuralDriver.java added at 1.1
diff -N GenericStructuralDriver.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ GenericStructuralDriver.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,74 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import org.lcsim.util.Driver;
+import org.lcsim.event.EventHeader;
+import org.lcsim.event.Cluster;
+
+import java.util.List;
+import java.util.Vector;
+
+/**
+ * 
+ **/
+
+public abstract class GenericStructuralDriver extends Driver {
+
+    transient protected EventHeader m_event;
+
+    abstract public void            initializeEvent();
+    abstract public List<Cluster>   getListOfBigClusters();
+    abstract public boolean         ignoreCluster(Cluster clus);
+    abstract public void            initializeBigCluster(Cluster clus);
+    abstract public Vector<Cluster> findTrackSegments(Cluster clus);
+    abstract public Vector<Cluster> findClumps(Cluster clus);
+    abstract public void            compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2);
+    abstract public void            compareTrackSegmentToClump(Cluster clus1, Cluster clus2);
+    abstract public void            compareClumpToClump(Cluster clus1, Cluster clus2);
+    abstract public void            finalizeBigCluster(Cluster clus, Vector<Cluster> vMIPs, Vector<Cluster> vClumps);
+    abstract public void            finalizeEvent();
+
+
+    public void process(EventHeader event)
+    {
+	m_event = event;
+	
+	initializeEvent();
+	List<Cluster> listOfBigClusters = getListOfBigClusters();
+	for (Cluster currentCluster : listOfBigClusters) {
+	    boolean ignoreThisCluster = ignoreCluster(currentCluster);
+	    if (!ignoreThisCluster) {
+		initializeBigCluster(currentCluster);
+                Vector<Cluster> vClumps = findClumps(currentCluster);
+                Vector<Cluster> vMIPs = findTrackSegments(currentCluster);
+		// Consider MIP-MIP links, making sure to compare each pair
+		// once and once only:
+		for (int iMIP=0; iMIP<vMIPs.size(); iMIP++) {
+                    Cluster track1 = (Cluster) (vMIPs.get(iMIP));
+                    // Compare to other MIPs:
+                    for (int jMIP=iMIP+1; jMIP<vMIPs.size(); jMIP++) {
+                        Cluster track2 = (Cluster) (vMIPs.get(jMIP));
+                        compareTrackSegmentToTrackSegment(track1, track2);
+                    }
+                }
+                // Now consider MIP-Clump links:
+		for (Cluster track : vMIPs) {
+		    for (Cluster clump : vClumps) {
+                        compareTrackSegmentToClump(track, clump);
+                    }
+                }
+                // Now consider Clump-Clump links:
+		for (Cluster clump1 : vClumps) {
+		    for (Cluster clump2 : vClumps) {
+			compareClumpToClump(clump1, clump2);
+		    }
+		}
+		// Finalize big cluster:
+		finalizeBigCluster(currentCluster, vMIPs, vClumps);
+            }
+        }
+        // OK, done looping. We may want to do a final step, such as writing out
+        // the results:
+        finalizeEvent();
+    }
+
+}

lcsim/src/org/lcsim/contrib/uiowa/structural
LikelihoodFindingStructuralDriver.java added at 1.1
diff -N LikelihoodFindingStructuralDriver.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ LikelihoodFindingStructuralDriver.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,177 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import java.util.List;
+import java.util.Vector;
+import java.util.Map;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.event.EventHeader;
+import structural.likelihood.*;
+
+public class LikelihoodFindingStructuralDriver extends GenericStructuralDriver {
+
+    ClusterAssociator m_clusterAssociator = null;
+    LikelihoodEvaluator m_eval = null;
+
+    public LikelihoodFindingStructuralDriver(LikelihoodEvaluator eval, ClusterAssociator assoc) {
+	m_eval = eval; // This should be set up beforehand
+	m_clusterAssociator = assoc;
+
+        // m_eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackDOCA(), 10, 0.0, 5.0, false, true);
+//         m_eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackPOCAInCalorimeter(), 2, -0.5, 1.5, false, 
+// false);
+//         m_eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackSmallestDistanceToPOCA(), 5, 0.0, 5.0, fal
+// se, true);
+//         m_eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackIntermediateHitsCount(), 10, -0.5, 9.5, fa
+// lse, true);
+//         m_eval.addLikelihoodQuantityTrackToTrack(new TrackToTrackIntermediateHitsFraction(), 11, -0.05, 1.0
+// 5, false, false);
+//         m_eval.addLikelihoodQuantityTrackToClump(new TrackToClumpDOCA(), 5, 0.0, 25.0, false, true);
+//         m_eval.addLikelihoodQuantityTrackToClump(new TrackToClumpMinDistance(), 5, 0.0, 25.0, false, true);
+//         m_eval.addLikelihoodQuantityClumpToClump(new ClumpToClumpDOCA(), 5, 0.0, 50.0, false, true);
+//         m_eval.addLikelihoodQuantityClumpToClump(new ClumpToClumpMinDistance(), 5, 0.0, 50.0, false, true);
+    }
+
+    public List<Cluster> getListOfBigClusters() {
+        // Grab from event store
+	List<Cluster> vBigClusters = m_event.get(Cluster.class, "MSTCluster Merged");
+        return vBigClusters;
+    }
+
+    public boolean ignoreCluster(Cluster clus) {
+        // Ignore if < 10 hits
+        if (clus.getCalorimeterHits().size() <= 10) {
+            return true; // ignore because too small
+        }
+
+	throw new AssertionError("Not implemented yet!");
+
+        // Ignore if dominant particle is not a charged hadron
+        // with > 500 MeV of energy. This is a bit arbitrary,
+	// but helps ensure we don't train the algorithm on
+	// 10 MeV photons, electrons, etc.
+
+	// Not implemented yet!
+
+        //MCParticle alphaParticle = findAlphaParticle(clus);
+        //if (!isCharged(alphaParticle)) {
+	//return true; // ignore because neutral
+	//}
+        //if (isLepton(alphaParticle)) {
+	//return true; // ignore because not hadron
+	//}
+	//// Check momentum...
+        //double[] mom = alphaParticle.getMomentum();
+        //double momMagnitude = Math.sqrt(mom[0]*mom[0] + mom[1]*mom[1] + mom[2]*mom[2]);
+        //if (momMagnitude<0.5) {
+	//return true;
+	//} else {
+	//return false; // don't ignore
+	//}
+    }
+    
+    public Vector<Cluster> findTrackSegments(Cluster clus) {
+	// FIX: This should be done through a parameter instead of a hard-coded name
+	// FIX: This is just ugly.
+	List<Object> dummyObjectList = m_event.get(Object.class, "MapClustersToMIPs");
+	Object dummyObject = dummyObjectList.iterator().next();
+	Map<Cluster, Vector<Cluster> > mapClustersToMIPs = (Map<Cluster, Vector<Cluster> >) (dummyObject);
+        Vector<Cluster> vMIPs = mapClustersToMIPs.get(clus);
+        return vMIPs;
+    }
+
+    public Vector<Cluster> findClumps(Cluster clus) {
+	// FIX: This should be done through a parameter instead of a hard-coded name
+	// FIX: This is just ugly.
+	List<Object> dummyObjectList = m_event.get(Object.class, "MapClustersToClumps");
+	Object dummyObject = dummyObjectList.iterator().next();
+	Map<Cluster, Vector<Cluster> > mapClustersToClumps = (Map<Cluster, Vector<Cluster> >) (dummyObject);
+        Vector<Cluster> vClumps = mapClustersToClumps.get(clus);
+        return vClumps;
+    }
+
+    public void compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2)
+    {
+        boolean isLinkCorrect = determineIfLinkIsCorrect(clus1, clus2);
+        List<LikelihoodDistribution> vDistributions = m_eval.getLikelihoodDistributionTrackToTrack(isLinkCorrect);
+	for ( LikelihoodDistribution dist : vDistributions ) {
+            dist.fill(clus1, clus2);
+        }
+    }
+
+    public void compareTrackSegmentToClump(Cluster clus1, Cluster clus2)
+    {
+        boolean isLinkCorrect = determineIfLinkIsCorrect(clus1, clus2);
+        List<LikelihoodDistribution> vDistributions = m_eval.getLikelihoodDistributionTrackToClump(isLinkCorrect);
+	for ( LikelihoodDistribution dist : vDistributions ) {
+            dist.fill(clus1, clus2);
+        }
+    }
+
+    public void compareClumpToClump(Cluster clus1, Cluster clus2) 
+    {
+        boolean isLinkCorrect = determineIfLinkIsCorrect(clus1, clus2);
+        List<LikelihoodDistribution> vDistributions = m_eval.getLikelihoodDistributionClumpToClump(isLinkCorrect);
+	for ( LikelihoodDistribution dist : vDistributions ) {
+            dist.fill(clus1, clus2);
+        }
+    }
+
+    protected boolean determineIfLinkIsCorrect(Cluster clus1, Cluster clus2)
+    {
+        return m_clusterAssociator.isLinkCorrect(clus1, clus2);
+    }
+
+//     // Find the dominant particles:
+//     MCParticle findAlphaParticle(Cluster cluster) {
+//         MCParticle[] particles = cluster.getMCParticles(); 
+//         double[] energies = cluster.getContributedEnergy(); 
+//         if (particles.length != energies.length) { throw new AssertionError("length mismatch"); }
+
+//         double maxEnergy = 0.0;
+//         MCParticle alphaParticle = null;
+//         for (int iParticle=0; iParticle<particles.length; iParticle++) {
+//             if (particles[iParticle] == null) { throw new AssertionError("Null particle can confuse algorit
+// hm"); }
+//             double energy = energies[iParticle];
+//             MCParticle particle = particles[iParticle];
+//             if (energy>maxEnergy || alphaParticle==null) {
+//                 maxEnergy = energy;
+//                 alphaParticle = particle;
+//             }
+//         }
+//         return alphaParticle;
+//     }
+
+ //    boolean isCharged(MCParticle part) {
+//         try {
+//             ParticleType type = part.getType();
+//             double charge = type.getCharge();
+//             return (Math.abs(charge)>0.5);
+//         } catch (Exception x) {
+//             // Er...
+//             return false;
+//         }
+//     }
+    
+//     boolean isLepton(MCParticle part) {
+//         try {
+//             ParticleType type = part.getType();
+//             int pdg = Math.abs(type.getPDGID());
+//             if (pdg>0 && pdg<20) {
+//                 return true;
+//             } else {
+//                 return false;
+//             }
+//         } catch (Exception x) {
+//             // Er...
+//             return true; // !
+//         }
+//     }
+
+    public void initializeEvent() {}
+    public void initializeBigCluster(Cluster bigClus) {}
+    public void finalizeEvent() {}
+    public void finalizeBigCluster(Cluster bigClus, Vector<Cluster> vMIPs, Vector<Cluster> vClumps) {}
+
+}

lcsim/src/org/lcsim/contrib/uiowa/structural
LikelihoodLinkDriver.java added at 1.1
diff -N LikelihoodLinkDriver.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ LikelihoodLinkDriver.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,176 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import java.util.List;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Vector;
+import java.util.Map;
+
+import org.lcsim.event.Cluster;
+import org.lcsim.recon.cluster.util.BasicCluster;
+import org.lcsim.event.EventHeader;
+import structural.likelihood.*;
+
+public class LikelihoodLinkDriver extends GenericStructuralDriver 
+{
+    LikelihoodEvaluator m_eval = null;
+    double m_cutTrackToTrack = 0.5;
+    double m_cutTrackToClump = 0.5;
+    double m_cutClumpToClump = 0.5;
+    Vector<Link> m_vlinksTrackToTrack = null;
+    Vector<Link> m_vlinksTrackToClump = null;
+    Vector<Link> m_vlinksClumpToClump = null;
+
+    public LikelihoodLinkDriver(LikelihoodEvaluator eval, double cutTrackToTrack, double cutTrackToClump, double cutClumpToClump) {
+        m_eval = eval;
+	m_cutTrackToTrack = cutTrackToTrack;
+	m_cutTrackToClump = cutTrackToClump;
+	m_cutClumpToClump = cutClumpToClump;
+    }
+
+        public List<Cluster> getListOfBigClusters() {
+        // Grab from event store
+	List<Cluster> vBigClusters = m_event.get(Cluster.class, "MSTCluster Merged");
+        return vBigClusters;
+    }
+
+    public boolean ignoreCluster(Cluster clus) {
+        // Ignore if < 10 hits
+        return (clus.getCalorimeterHits().size() <= 10);
+    }
+    
+    public Vector<Cluster> findTrackSegments(Cluster clus) {
+	// FIX: This should be done through a parameter instead of a hard-coded name
+	// FIX: This is just ugly.
+	List<Object> dummyObjectList = m_event.get(Object.class, "MapClustersToMIPs");
+	Object dummyObject = dummyObjectList.iterator().next();
+	Map<Cluster, Vector<Cluster> > mapClustersToMIPs = (Map<Cluster, Vector<Cluster> >) (dummyObject);
+        Vector<Cluster> vMIPs = mapClustersToMIPs.get(clus);
+        return vMIPs;
+    }
+    
+    public Vector<Cluster> findClumps(Cluster clus) {
+	// FIX: This should be done through a parameter instead of a hard-coded name
+	// FIX: This is just ugly.
+	List<Object> dummyObjectList = m_event.get(Object.class, "MapClustersToClumps");
+	Object dummyObject = dummyObjectList.iterator().next();
+	Map<Cluster, Vector<Cluster> > mapClustersToClumps = (Map<Cluster, Vector<Cluster> >) (dummyObject);
+        Vector<Cluster> vClumps = mapClustersToClumps.get(clus);
+        return vClumps;
+    }
+
+    public void compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2)  {
+        double likelihood = m_eval.getLinkLikelihoodTrackToTrack(clus1, clus2);
+        if (likelihood > m_cutTrackToTrack) {
+            m_vlinksTrackToTrack.add(new Link(clus1, clus2));
+        }
+    }
+    public void compareTrackSegmentToClump(Cluster clus1, Cluster clus2)  {
+        double likelihood = m_eval.getLinkLikelihoodTrackToClump(clus1, clus2);
+        if (likelihood > m_cutTrackToClump) {
+            m_vlinksTrackToClump.add(new Link(clus1, clus2));
+        }
+    }
+    public void compareClumpToClump(Cluster clus1, Cluster clus2)  {
+        double likelihood = m_eval.getLinkLikelihoodClumpToClump(clus1, clus2);
+        if (likelihood > m_cutClumpToClump) {
+            m_vlinksClumpToClump.add(new Link(clus1, clus2));
+        }
+    }
+
+    public void initializeEvent() {
+        //Map tmpMap = new HashMap();
+        //m_event.put("MapClustersToSkeletons", tmpMap);
+	throw new AssertionError("Not implemented");
+    }
+
+    public void initializeBigCluster(Cluster bigClus) {
+        m_vlinksTrackToTrack = new Vector<Link>();
+        m_vlinksTrackToClump = new Vector<Link>();
+        m_vlinksClumpToClump = new Vector<Link>();
+    }
+
+    public void finalizeEvent() {
+        //
+    }
+    public void finalizeBigCluster(Cluster bigClus, Vector<Cluster> vMIPs, Vector<Cluster> vClumps) {
+        // Merge links & clusters:
+        Vector<Cluster> vLinkedClusters = new Vector<Cluster>();
+        Set<Cluster> unassignedClumps = new HashSet<Cluster>();
+        Set<Cluster> unassignedMIPs = new HashSet<Cluster>();
+        unassignedClumps.addAll(vClumps);
+        unassignedMIPs.addAll(vMIPs);
+        while ( !(unassignedMIPs.isEmpty() && unassignedClumps.isEmpty()) ) {
+            Set<Cluster> linkedClusters = new HashSet<Cluster>(); // A set containing a bunch of clusters, linked.
+            if ( !(unassignedMIPs.isEmpty()) ) {
+                Cluster nextMIP = unassignedMIPs.iterator().next();
+                recursivelyAddTrack(nextMIP, linkedClusters, unassignedMIPs, unassignedClumps);
+            } else {
+                Cluster nextClump = unassignedClumps.iterator().next();
+                recursivelyAddClump(nextClump, linkedClusters, unassignedMIPs, unassignedClumps);
+            }
+	    BasicCluster newMergedCluster = new BasicCluster();
+	    for (Cluster currentLinkedCluster : linkedClusters) {
+		newMergedCluster.addCluster(currentLinkedCluster);
+	    }
+            vLinkedClusters.add(newMergedCluster);
+        }
+	
+	throw new AssertionError("Not implemented");
+        //// Output is a vector of MergedCluster objects for this big cluster
+        //Map tmpMap = (Map) (m_event.get("MapClustersToSkeletons"));
+        //tmpMap.put(bigClus, vLinkedClusters);
+        //m_event.put("MapClustersToSkeletons", tmpMap);
+    }
+
+    void recursivelyAddTrack(Cluster currentMIP, Set<Cluster> linkedClusters, Set<Cluster> unassignedMIPs, Set<Cluster> unassignedClumps) {
+        boolean removedCurrentOK = unassignedMIPs.remove(currentMIP);
+        boolean addedCurrentOK = linkedClusters.add(currentMIP);
+        if (!removedCurrentOK) { throw new AssertionError("Failed to remove MIP"); }
+        if (!addedCurrentOK) { throw new AssertionError("Failed to add MIP"); }
+        // Track-Track links:
+	for (Link currentLink : m_vlinksTrackToTrack) {
+            if (currentLink.contains(currentMIP)) {
+                Cluster nextMIP = currentLink.counterpart(currentMIP);
+                if (unassignedMIPs.contains(nextMIP)) {
+                    recursivelyAddTrack(nextMIP, linkedClusters, unassignedMIPs, unassignedClumps);
+                }
+            }
+        }
+        // Track-Clump links
+	for (Link currentLink : m_vlinksTrackToClump) {
+            if (currentLink.contains(currentMIP)) {
+                Cluster nextClump = currentLink.counterpart(currentMIP);
+                if (unassignedClumps.contains(nextClump)) {
+                    recursivelyAddClump(nextClump, linkedClusters, unassignedMIPs, unassignedClumps);
+                }
+            }
+        }
+    }
+
+    void recursivelyAddClump(Cluster currentClump, Set<Cluster> linkedClusters, Set<Cluster> unassignedMIPs, Set<Cluster> unassignedClumps) {
+        boolean removedCurrentOK = unassignedClumps.remove(currentClump);
+        boolean addedCurrentOK = linkedClusters.add(currentClump);
+        if (!removedCurrentOK && !addedCurrentOK) { throw new AssertionError("Failed to add clump AND failed to add clump."); }
+        if (!removedCurrentOK) { throw new AssertionError("Failed to remove clump."); }
+        if (!addedCurrentOK) { throw new AssertionError("Failed to add clump."); }
+        // Track-Clump links
+	for (Link currentLink : m_vlinksTrackToClump) {
+            if (currentLink.contains(currentClump)) {
+                Cluster nextMIP = currentLink.counterpart(currentClump);
+                if (unassignedMIPs.contains(nextMIP)) {
+                    recursivelyAddTrack(nextMIP, linkedClusters, unassignedMIPs, unassignedClumps);
+                }
+            }
+        }
+        // Clump-Clump links
+	for (Link currentLink : m_vlinksClumpToClump) {
+            if (currentLink.contains(currentClump)) {
+                Cluster nextClump = currentLink.counterpart(currentClump);
+                if (unassignedClumps.contains(nextClump)) {
+                    recursivelyAddClump(nextClump, linkedClusters, unassignedMIPs, unassignedClumps);
+                }
+            }
+        }
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural
LikelihoodLinkPlotterDriver.java added at 1.1
diff -N LikelihoodLinkPlotterDriver.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ LikelihoodLinkPlotterDriver.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,98 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import java.io.IOException; 
+import org.lcsim.event.Cluster;
+import hep.aida.ITree;
+import hep.aida.IAnalysisFactory; 
+import hep.aida.IHistogramFactory; 
+import hep.aida.IHistogram1D; 
+
+import structural.likelihood.*;
+
+public class LikelihoodLinkPlotterDriver extends LikelihoodLinkDriver
+{
+
+    ClusterAssociator m_clusterAssociator = null;
+    ITree m_tree = null;
+    IHistogramFactory m_histoFactory = null;
+
+    IHistogram1D m_hTrackTrackSignal = null;
+    IHistogram1D m_hTrackTrackBckgnd = null;
+    IHistogram1D m_hTrackClumpSignal = null;
+    IHistogram1D m_hTrackClumpBckgnd = null;
+    IHistogram1D m_hClumpClumpSignal = null;
+    IHistogram1D m_hClumpClumpBckgnd = null;
+
+    public LikelihoodLinkPlotterDriver(LikelihoodEvaluator eval, double cutTrackToTrack, double cutTrackToClump, double cutClumpToClump, ClusterAssociator assoc) {
+        super(eval, cutTrackToTrack, cutTrackToClump, cutClumpToClump);
+        m_clusterAssociator = assoc;
+    }
+
+    public void initPlots(String filename) {
+        IAnalysisFactory af = IAnalysisFactory.create();
+        try {
+            m_tree = af.createTreeFactory().create(filename,"xml",false,true); 
+            m_histoFactory = af.createHistogramFactory(m_tree); 
+            int nbins = 20;
+            m_hTrackTrackSignal = m_histoFactory.createHistogram1D("hTrackTrackSignal", nbins, 0.0, 1.0);
+            m_hTrackTrackBckgnd = m_histoFactory.createHistogram1D("hTrackTrackBckgnd", nbins, 0.0, 1.0);
+            m_hTrackClumpSignal = m_histoFactory.createHistogram1D("hTrackClumpSignal", nbins, 0.0, 1.0);
+            m_hTrackClumpBckgnd = m_histoFactory.createHistogram1D("hTrackClumpBckgnd", nbins, 0.0, 1.0);
+            m_hClumpClumpSignal = m_histoFactory.createHistogram1D("hClumpClumpSignal", nbins, 0.0, 1.0);
+            m_hClumpClumpBckgnd = m_histoFactory.createHistogram1D("hClumpClumpBckgnd", nbins, 0.0, 1.0);
+        } catch(IOException ioe1) {
+            ioe1.printStackTrace(); 
+        }
+    }
+    
+    public void finalizePlots() {
+        try {
+            m_tree.commit();
+        } catch(IOException ioe1) {
+            ioe1.printStackTrace(); 
+        }
+    }
+
+        public void compareTrackSegmentToTrackSegment(Cluster clus1, Cluster clus2)
+    {
+        double likelihood = m_eval.getLinkLikelihoodTrackToTrack(clus1, clus2);
+        if (determineIfLinkIsCorrect(clus1, clus2)) {
+            m_hTrackTrackSignal.fill(likelihood);
+        } else {
+            m_hTrackTrackBckgnd.fill(likelihood);
+        }
+	super.compareTrackSegmentToTrackSegment(clus1, clus2);
+        //if (likelihood > m_cutTrackToTrack) {
+	//m_vlinksTrackToTrack.add(new Link(clus1, clus2));
+	//}
+    }
+    public void compareTrackSegmentToClump(Cluster clus1, Cluster clus2)
+    {
+        double likelihood = m_eval.getLinkLikelihoodTrackToClump(clus1, clus2);
+        if (determineIfLinkIsCorrect(clus1, clus2)) {
+            m_hTrackClumpSignal.fill(likelihood);
+        } else {
+            m_hTrackClumpBckgnd.fill(likelihood);
+        }
+        if (likelihood > m_cutTrackToClump) {
+            m_vlinksTrackToClump.add(new Link(clus1, clus2));
+        }
+    }
+    public void compareClumpToClump(Cluster clus1, Cluster clus2)
+    {
+        double likelihood = m_eval.getLinkLikelihoodClumpToClump(clus1, clus2);
+        if (determineIfLinkIsCorrect(clus1, clus2)) {
+            m_hClumpClumpSignal.fill(likelihood);
+        } else {
+            m_hClumpClumpBckgnd.fill(likelihood);
+        }
+        if (likelihood > m_cutClumpToClump) {
+            m_vlinksClumpToClump.add(new Link(clus1, clus2));
+        }
+    }
+
+    protected boolean determineIfLinkIsCorrect(Cluster clus1, Cluster clus2)
+    {
+        return m_clusterAssociator.isLinkCorrect(clus1, clus2);
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural
Link.java added at 1.1
diff -N Link.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ Link.java	29 Sep 2005 21:04:48 -0000	1.1
@@ -0,0 +1,24 @@
+package structural; // package org.lcsim.recon.cluster.structural;
+
+import org.lcsim.event.Cluster;
+
+public class Link {
+    public Link(Cluster clus1, Cluster clus2) {
+        c1 = clus1;
+        c2 = clus2;
+    }
+    public boolean contains(Cluster clus) {
+        return (clus==c1 || clus==c2);
+    }
+    public Cluster counterpart(Cluster clus) {
+        if (clus==c1) {
+            return c2;
+        } else if (clus==c2) {
+            return c1;
+        } else {
+            return null;
+        }
+    }       
+    protected Cluster c1 = null;
+    protected Cluster c2 = null;
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
ClumpToClumpDOCA.java added at 1.1
diff -N ClumpToClumpDOCA.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ ClumpToClumpDOCA.java	29 Sep 2005 21:04:49 -0000	1.1
@@ -0,0 +1,33 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+
+public class ClumpToClumpDOCA implements StructuralLikelihoodQuantity
+{
+    public ClumpToClumpDOCA() {}
+
+    public double evaluate(Cluster clus1, Cluster clus2)
+    {
+	// We used to be able to figure out which cluster was
+	// pointier from the eigenvaluse, but now we can't (easily).
+	// So sad. So now we try it both ways and return the smaller
+	// of the two DOCAs.
+
+	Hep3Vector position1 = new BasicHep3Vector(clus1.getPosition());
+	Hep3Vector position2 = new BasicHep3Vector(clus2.getPosition());
+	Trajectory line1 = new Line(position1, clus1.getIPhi(), clus1.getITheta());
+	Trajectory line2 = new Line(position2, clus2.getIPhi(), clus2.getITheta());
+
+	double doca1 = Math.abs(line1.getDistanceToPoint(position2));
+	double doca2 = Math.abs(line2.getDistanceToPoint(position1));
+
+	double doca = Math.min(doca1, doca2);
+	return doca;
+    }
+}
+
+

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
ClusterToClusterMinDistance.java added at 1.1
diff -N ClusterToClusterMinDistance.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ ClusterToClusterMinDistance.java	29 Sep 2005 21:04:49 -0000	1.1
@@ -0,0 +1,13 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class ClusterToClusterMinDistance implements StructuralLikelihoodQuantity
+{
+    public ClusterToClusterMinDistance() {}
+
+    public double evaluate(Cluster clus1, Cluster clus2) {
+        double minDist = MiscUtilities.distance(clus1, clus2);
+        return minDist;
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
IntermediateHitFinder.java added at 1.1
diff -N IntermediateHitFinder.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ IntermediateHitFinder.java	29 Sep 2005 21:04:49 -0000	1.1
@@ -0,0 +1,28 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+import hep.physics.vec.Hep3Vector;
+
+
+public class IntermediateHitFinder {
+
+    public IntermediateHitFinder() {
+	//
+    }
+
+    InfoLookForHits lookForIntermediateHits(Cluster track, Hep3Vector point, double distanceOfStartingPointOnTrack)
+    {
+	throw new AssertionError("Not implemented");
+    }
+
+   // Just something to pass back the hit info...
+    protected class InfoLookForHits {	
+        public InfoLookForHits() {}
+        public int countMIPHitsFound = 0;
+        public int countNewHitsFound = 0;
+        public int countHitsNotFound = 0;
+        public int countMIPNeighbourHitsFound = 0;
+        public int countNewNeighbourHitsFound = 0;
+        public int countNeighbourHitsNotFound = 0;
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
LikelihoodDistribution.java added at 1.1
diff -N LikelihoodDistribution.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ LikelihoodDistribution.java	29 Sep 2005 21:04:49 -0000	1.1
@@ -0,0 +1,164 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class LikelihoodDistribution implements java.io.Serializable
+{
+
+    StructuralLikelihoodQuantity m_quantity = null;
+    int m_nbins;
+    double m_min;
+    double m_max;
+    boolean m_useUnderFlow;
+    boolean m_useOverFlow;
+
+    double[] m_binContent;
+    double m_underFlow;
+    double m_overFlow;
+
+    boolean m_normalized = false;
+    double m_normalization;
+    
+    public boolean useUnderFlow() { return m_useUnderFlow; }
+    public boolean useOverFlow() { return m_useOverFlow; }
+    public int getNbins() { return m_nbins; }
+    public double getMin() { return m_min; }
+    public double getMax() { return m_max; }
+    public StructuralLikelihoodQuantity getQuantity() { return m_quantity; }
+
+    public LikelihoodDistribution(StructuralLikelihoodQuantity quantity, int nbins, double min, double max, boolean useUnderFlow, boolean useOverFlow)
+    {
+        m_quantity = quantity;
+        m_nbins = nbins;
+        m_min = min;
+        m_max = max;
+        m_useUnderFlow = useUnderFlow;
+        m_useOverFlow = useOverFlow;
+
+        m_binContent = new double[m_nbins];
+        for (int i=0; i<m_nbins; i++) {
+            m_binContent[i] = 0.0;
+        }
+        m_underFlow = 0.0;
+        m_overFlow = 0.0;
+
+        m_normalized = false;
+    }
+
+    protected void fill(double value, double weight)
+    {
+        // Is it in one of the main bins?
+        if (value>=m_min && value<=m_max) {
+            // Yes -- which one? Watch out for a
+            // rounding problem...
+            double frac = ((value-m_min)/(m_max-m_min));
+            double binIndex_double = frac * ( (double)(m_nbins) );
+            int binIndex = (int) (binIndex_double);
+            if (binIndex<0 || binIndex>=m_nbins) {
+                throw new AssertionError("Help! In range ("+m_min+","+m_max+") with "+m_nbins+" bins, a value of "+value+" got mapped to bin "+binIndex);
+            }
+            m_binContent[binIndex] += weight;
+        } else if (value < m_min) {
+            // Underflow
+            m_underFlow += weight;
+        } else {
+            // Overflow
+            m_overFlow += weight;
+        }
+
+        // We've just filled => not normalized right now.
+        m_normalized = false;
+    }
+
+    public void fill(Cluster clus1, Cluster clus2) {
+        try {
+            double quantityValue = m_quantity.evaluate(clus1, clus2);
+            fill(quantityValue);
+        } catch (QuantityNotDefinedException x) {
+            // Quantity not valid
+            System.out.println("Warning: "+x);
+        }
+    }
+
+    protected void fill(double value) {
+        fill(value, 1.0);
+    }
+
+    public void normalize() {
+        // Normalize the PDF so it sums to 1.
+        // [assert m_nbins = m_binContent.length]
+
+        // Find the total...
+        m_normalization = 0.0;
+        for (int i=0; i<m_binContent.length; i++) {
+            m_normalization += m_binContent[i];
+        }
+        if (m_useUnderFlow) {
+            m_normalization += m_underFlow;
+        }
+        if (m_useOverFlow) {
+            m_normalization += m_overFlow;
+        }
+
+        // Done
+        m_normalized = true;
+    }
+
+    public double getPDF(Cluster clus1, Cluster clus2) throws QuantityNotDefinedException
+    {
+        double value = 0;
+        try {
+            value = m_quantity.evaluate(clus1, clus2);
+        } catch (QuantityNotDefinedException x) {
+            // Not defined
+            throw x;
+        }
+        if (!m_normalized) { normalize(); }
+        // Is it in one of the main bins?
+        if (value>=m_min && value<=m_max) {
+            // Yes -- which one? Watch out for a
+            // rounding problem...
+            double frac = ((value-m_min)/(m_max-m_min));
+            double binIndex_double = frac * ( (double)(m_nbins) );
+            int binIndex = (int) (binIndex_double);
+            return m_binContent[binIndex]/m_normalization;
+        } else if (value < m_min) {
+            // Underflow
+            if (!m_useUnderFlow) { throw new AssertionError("Underflow"); }
+            return m_underFlow/m_normalization;
+        } else {
+            // Overflow
+            if (!m_useOverFlow) { throw new AssertionError("Overflow"); }
+            return m_overFlow/m_normalization;
+        }
+    }
+
+    public double getPDF(int bin)
+    {
+        if (!m_normalized) {
+            normalize();
+        }
+
+        if (bin>=0 && bin<m_nbins) {
+            // regular bin
+            return m_binContent[bin]/m_normalization;
+        } else if (bin==-1) {
+            // underflow bin
+            if (m_useUnderFlow) {
+                return m_underFlow/m_normalization;
+            } else {
+                throw new AssertionError("Tried to use underflow bin when not enabled");
+            }
+        } else if (bin==m_nbins) {
+            // overflow bin
+            if (m_useOverFlow) {
+                return m_overFlow/m_normalization;
+            } else {
+                throw new AssertionError("Tried to use overflow bin when not enabled");
+            }
+        } else {
+            throw new AssertionError("Bin index out of range: "+bin+" not in (-1, "+m_nbins+")");
+        }
+    }
+
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
LikelihoodEvaluator.java added at 1.1
diff -N LikelihoodEvaluator.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ LikelihoodEvaluator.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,129 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Vector;
+
+import java.io.FileOutputStream;
+import java.io.FileInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.io.IOException; 
+
+import org.lcsim.event.Cluster;
+
+public class LikelihoodEvaluator implements java.io.Serializable {
+
+    Map<Boolean, Vector<LikelihoodDistribution> > m_LikelihoodDistributionsTrackToTrack = null;
+    Map<Boolean, Vector<LikelihoodDistribution> > m_LikelihoodDistributionsTrackToClump = null;
+    Map<Boolean, Vector<LikelihoodDistribution> > m_LikelihoodDistributionsClumpToClump = null;
+
+    public LikelihoodEvaluator()
+    {
+        // Maps from Boolean (the tag) to Distribution
+        m_LikelihoodDistributionsTrackToTrack = new HashMap<Boolean, Vector<LikelihoodDistribution> >();
+        m_LikelihoodDistributionsTrackToClump = new HashMap<Boolean, Vector<LikelihoodDistribution> >();
+        m_LikelihoodDistributionsClumpToClump = new HashMap<Boolean, Vector<LikelihoodDistribution> >();
+        m_LikelihoodDistributionsTrackToTrack.put(Boolean.valueOf(true),  new Vector<LikelihoodDistribution>());
+        m_LikelihoodDistributionsTrackToTrack.put(Boolean.valueOf(false),  new Vector<LikelihoodDistribution>());
+        m_LikelihoodDistributionsTrackToClump.put(Boolean.valueOf(true),  new Vector<LikelihoodDistribution>());
+        m_LikelihoodDistributionsTrackToClump.put(Boolean.valueOf(false),  new Vector<LikelihoodDistribution>());
+        m_LikelihoodDistributionsClumpToClump.put(Boolean.valueOf(true),  new Vector<LikelihoodDistribution>());
+        m_LikelihoodDistributionsClumpToClump.put(Boolean.valueOf(false),  new Vector<LikelihoodDistribution>());
+    }
+
+    public void addLikelihoodQuantityTrackToTrack(StructuralLikelihoodQuantity quant, int nbins, double min, double max, boolean useUnderFlow, boolean useOverFlow) {
+        Vector<LikelihoodDistribution> vTrue  = m_LikelihoodDistributionsTrackToTrack.get(Boolean.valueOf(true));
+        Vector<LikelihoodDistribution> vFalse = m_LikelihoodDistributionsTrackToTrack.get(Boolean.valueOf(false));
+        vTrue .add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+        vFalse.add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+    }
+    public void addLikelihoodQuantityTrackToClump(StructuralLikelihoodQuantity quant, int nbins, double min
+, double max, boolean useUnderFlow, boolean useOverFlow) {
+        Vector<LikelihoodDistribution> vTrue  = m_LikelihoodDistributionsTrackToClump.get(Boolean.valueOf(true));
+        Vector<LikelihoodDistribution> vFalse = m_LikelihoodDistributionsTrackToClump.get(Boolean.valueOf(false));
+        vTrue .add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+        vFalse.add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+    }
+    public void addLikelihoodQuantityClumpToClump(StructuralLikelihoodQuantity quant, int nbins, double min
+, double max, boolean useUnderFlow, boolean useOverFlow) {
+        Vector<LikelihoodDistribution> vTrue  = m_LikelihoodDistributionsClumpToClump.get(Boolean.valueOf(true));
+        Vector<LikelihoodDistribution> vFalse = m_LikelihoodDistributionsClumpToClump.get(Boolean.valueOf(false));
+        vTrue .add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+        vFalse.add(new LikelihoodDistribution(quant, nbins, min, max, useUnderFlow, useOverFlow));
+    }
+
+    public Vector<LikelihoodDistribution> getLikelihoodDistributionTrackToTrack(boolean goodLink) {
+        Boolean isLinkCorrect = Boolean.valueOf(goodLink);
+        Vector<LikelihoodDistribution> vDistributions = m_LikelihoodDistributionsTrackToTrack.get(isLinkCorrect);
+        return vDistributions;
+    }
+    public Vector<LikelihoodDistribution> getLikelihoodDistributionTrackToClump(boolean goodLink) {
+        Boolean isLinkCorrect = Boolean.valueOf(goodLink);
+        Vector<LikelihoodDistribution> vDistributions = m_LikelihoodDistributionsTrackToClump.get(isLinkCorrect);
+        return vDistributions;
+    }
+    public Vector<LikelihoodDistribution> getLikelihoodDistributionClumpToClump(boolean goodLink) {
+        Boolean isLinkCorrect = Boolean.valueOf(goodLink);
+        Vector<LikelihoodDistribution> vDistributions = m_LikelihoodDistributionsClumpToClump.get(isLinkCorrect);
+        return vDistributions;
+    }
+    
+    public double getLinkLikelihoodTrackToTrack(Cluster clus1, Cluster clus2) {
+	Vector<LikelihoodDistribution> vLinked = getLikelihoodDistributionTrackToTrack(true);
+	Vector<LikelihoodDistribution> vUnlinked = getLikelihoodDistributionTrackToTrack(false);
+	return getLinkLikelihood(clus1, clus2, vLinked, vUnlinked);
+    }   
+    public double getLinkLikelihoodTrackToClump(Cluster clus1, Cluster clus2) {
+        Vector<LikelihoodDistribution> vLinked = getLikelihoodDistributionTrackToClump(true);
+        Vector<LikelihoodDistribution> vUnlinked = getLikelihoodDistributionTrackToClump(false);
+        return getLinkLikelihood(clus1, clus2, vLinked, vUnlinked);
+    }   
+    public double getLinkLikelihoodClumpToClump(Cluster clus1, Cluster clus2) {
+        Vector<LikelihoodDistribution> vLinked = getLikelihoodDistributionClumpToClump(true);
+        Vector<LikelihoodDistribution> vUnlinked = getLikelihoodDistributionClumpToClump(false);
+        return getLinkLikelihood(clus1, clus2, vLinked, vUnlinked);
+    }   
+
+    protected double getLinkLikelihood(Cluster clus1, Cluster clus2, Vector<LikelihoodDistribution> vLinked, Vector<LikelihoodDistribution> vUnlinked)
+    {
+        double totalGoodLikelihood = 1.0;
+        double totalBadLikelihood = 1.0;
+        for (int i=0; i<vLinked.size(); i++) {
+            LikelihoodDistribution distLinked = (LikelihoodDistribution) (vLinked.get(i));
+            LikelihoodDistribution distUnlinked = (LikelihoodDistribution) (vUnlinked.get(i));
+            try {
+                double goodPDF = distLinked.getPDF(clus1, clus2);
+                double badPDF = distUnlinked.getPDF(clus1, clus2);
+                double goodProb = goodPDF/(goodPDF+badPDF);
+                double badProb = badPDF/(goodPDF+badPDF);
+                totalGoodLikelihood *= goodProb;
+                totalBadLikelihood *= badProb;
+            } catch (QuantityNotDefinedException x) {
+                // Have to ignore this one.
+                System.out.println("Warning: "+x);
+            }
+        }       
+        double normalizedGoodLikelihood = totalGoodLikelihood / (totalGoodLikelihood+totalBadLikelihood);
+        return normalizedGoodLikelihood;
+    }
+
+    // A static routine to read from a serialised file:
+    static public LikelihoodEvaluator readFromFile(String filename)
+    {
+        try {
+            FileInputStream fis = new FileInputStream(filename);
+            ObjectInputStream ois = new ObjectInputStream(fis);
+            Object o = ois.readObject();
+            LikelihoodEvaluator eval = (LikelihoodEvaluator) o;
+            return eval;
+        } catch (java.io.IOException x) {
+            throw new AssertionError("java.io.IOException: "+x);
+        } catch (java.lang.ClassNotFoundException x) {
+            throw new AssertionError("java.lang.ClassNotFoundException: "+x);
+        }
+    }
+
+
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
MiscUtilities.java added at 1.1
diff -N MiscUtilities.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ MiscUtilities.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,60 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.CalorimeterHit;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+import hep.physics.vec.VecOp;
+import java.util.List;
+
+class MiscUtilities
+{
+    /**
+     * Get the minimum distance between hits in the two clusters.
+     * If one or both of the clusters is empty, will return NaN.
+     */
+    static protected double distance(Cluster clus1, Cluster clus2) 
+    {
+	// Loop over hits...
+	boolean firstCheck = true;
+	double minDistance = Double.NaN; // Will stay NaN if either is empty
+	List<CalorimeterHit> hits1 = clus1.getCalorimeterHits();
+	List<CalorimeterHit> hits2 = clus2.getCalorimeterHits();
+	for (CalorimeterHit hit1 : hits1) {
+	    for (CalorimeterHit hit2 : hits2) {
+		double dist = distance(hit1, hit2);
+		if (firstCheck || dist<minDistance) {
+		    minDistance = dist;
+		    firstCheck = false;
+		}
+	    }
+	}
+	
+	return minDistance;
+    }
+
+    static protected double distance(Cluster clus, CalorimeterHit hit)
+    {
+	// Loop over hits...
+	boolean firstCheck = true;
+	double minDistance = Double.NaN; // Will stay NaN if clus is empty
+	List<CalorimeterHit> hits = clus.getCalorimeterHits();
+	for (CalorimeterHit hitInCluster : hits) {
+	    double dist = distance(hit, hitInCluster);
+	    if (firstCheck || dist<minDistance) {
+		minDistance = dist;
+		firstCheck = false;
+	    }
+	}
+
+	return minDistance;
+    }
+
+    static protected double distance(CalorimeterHit hit1, CalorimeterHit hit2)
+    {
+	Hep3Vector vect1 = new BasicHep3Vector(hit1.getPosition());
+	Hep3Vector vect2 = new BasicHep3Vector(hit2.getPosition());
+	Hep3Vector displacement = VecOp.sub(vect1, vect2);
+	return displacement.magnitude();
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
QuantityNotDefinedException.java added at 1.1
diff -N QuantityNotDefinedException.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ QuantityNotDefinedException.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,7 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import java.lang.String;
+
+public class QuantityNotDefinedException extends java.lang.Exception {
+    public QuantityNotDefinedException(String m) { super(m); }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
StructuralLikelihoodQuantity.java added at 1.1
diff -N StructuralLikelihoodQuantity.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ StructuralLikelihoodQuantity.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,11 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+// Hm. What if some quantities are integer?
+// For now, assume everything is double.
+
+import org.lcsim.event.Cluster;
+
+public interface StructuralLikelihoodQuantity extends java.io.Serializable
+{
+    public double evaluate(Cluster clus1, Cluster clus2) throws QuantityNotDefinedException;
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToClumpDOCA.java added at 1.1
diff -N TrackToClumpDOCA.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToClumpDOCA.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,23 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+
+public class TrackToClumpDOCA implements StructuralLikelihoodQuantity
+{
+    public TrackToClumpDOCA() {}
+
+    public double evaluate(Cluster track, Cluster clump)
+    {
+	Hep3Vector positionTrack = new BasicHep3Vector(track.getPosition());
+	Hep3Vector positionClump = new BasicHep3Vector(clump.getPosition());
+	Trajectory line = new Line(positionTrack, track.getIPhi(), track.getITheta());
+	double doca = Math.abs(line.getDistanceToPoint(positionClump));
+	return doca;
+    }
+
+    
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToTrackDOCA.java added at 1.1
diff -N TrackToTrackDOCA.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToTrackDOCA.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,25 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+
+public class TrackToTrackDOCA implements StructuralLikelihoodQuantity
+{
+    public TrackToTrackDOCA() {}
+
+    public double evaluate(Cluster track1, Cluster track2)
+    {
+	Hep3Vector positionTrack1 = new BasicHep3Vector(track1.getPosition());
+	Hep3Vector positionTrack2 = new BasicHep3Vector(track2.getPosition());
+	Trajectory line1 = new Line(positionTrack1, track1.getIPhi(), track1.getITheta());
+	Trajectory line2 = new Line(positionTrack2, track2.getIPhi(), track2.getITheta());
+
+        // Routine to do this properly doesn't exist yet
+        return Double.NaN;
+    }
+
+    
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToTrackIntermediateHits.java added at 1.1
diff -N TrackToTrackIntermediateHits.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToTrackIntermediateHits.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,35 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.util.swim.Trajectory;
+import org.lcsim.util.swim.Line;
+import org.lcsim.event.Cluster;
+import hep.physics.vec.BasicHep3Vector;
+import hep.physics.vec.Hep3Vector;
+
+abstract public class TrackToTrackIntermediateHits implements StructuralLikelihoodQuantity
+{
+    public TrackToTrackIntermediateHits() {}
+
+    transient protected IntermediateHitFinder.InfoLookForHits m_info1 = null;
+    transient protected IntermediateHitFinder.InfoLookForHits m_info2 = null;
+    transient protected IntermediateHitFinder.InfoLookForHits m_infoParallel = null;
+
+    // Figure out if tracks are parallel
+    protected boolean areTracksParallel(Cluster clus1, Cluster clus2) 
+    {
+	throw new AssertionError("Not implemented");
+    }
+
+    // Make info for divergent (non-parallel) tracks
+    protected void handleDivergentTracks(Cluster clus1, Cluster clus2) 
+    {
+	throw new AssertionError("Not implemented");
+    }
+
+    //Make info for parallel tracks
+      protected void handleParallelTracks(Cluster clus1, Cluster clus2) 
+    {
+	throw new AssertionError("Not implemented");
+    }
+
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToTrackIntermediateHitsCount.java added at 1.1
diff -N TrackToTrackIntermediateHitsCount.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToTrackIntermediateHitsCount.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,24 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class TrackToTrackIntermediateHitsCount extends TrackToTrackIntermediateHits {
+ 
+    public TrackToTrackIntermediateHitsCount() {}
+
+    public double evaluate(Cluster track1, Cluster track2) 
+    {
+	double layersWithMissingHit = 0;
+        double layersWithFoundHit = 0;
+        if (areTracksParallel(track1, track2)) {
+            handleParallelTracks(track1, track2);
+            layersWithMissingHit = m_infoParallel.countHitsNotFound;
+            layersWithFoundHit = m_infoParallel.countNewHitsFound;
+	} else {
+	    handleDivergentTracks(track1, track2);
+            layersWithMissingHit = m_info1.countHitsNotFound + m_info2.countHitsNotFound;
+            layersWithFoundHit = m_info1.countNewHitsFound + m_info2.countNewHitsFound;
+	}
+	return layersWithMissingHit;
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToTrackIntermediateHitsFraction.java added at 1.1
diff -N TrackToTrackIntermediateHitsFraction.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToTrackIntermediateHitsFraction.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,29 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class TrackToTrackIntermediateHitsFraction extends TrackToTrackIntermediateHits
+{
+    public TrackToTrackIntermediateHitsFraction() {}
+
+    public double evaluate(Cluster track1, Cluster track2) throws QuantityNotDefinedException
+    {
+        double layersWithMissingHit = 0;
+        double layersWithFoundHit = 0;
+        if (areTracksParallel(track1, track2)) {
+            handleParallelTracks(track1, track2);
+            layersWithMissingHit = m_infoParallel.countHitsNotFound;
+            layersWithFoundHit = m_infoParallel.countNewHitsFound;
+	} else {
+            handleDivergentTracks(track1, track2);
+            layersWithMissingHit = m_info1.countHitsNotFound + m_info2.countHitsNotFound;
+            layersWithFoundHit = m_info1.countNewHitsFound + m_info2.countNewHitsFound;
+        }
+        
+        if (layersWithMissingHit+layersWithFoundHit > 0) {
+            return layersWithMissingHit/(layersWithMissingHit+layersWithFoundHit);
+        } else {
+            throw new QuantityNotDefinedException("Can't compute fraction: division by zero: layersWithMissingHit+layersWithFoundHit = "+layersWithMissingHit+" + "+layersWithFoundHit+" = "+(layersWithMissingHit+layersWithFoundHit));
+        }
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToTrackPOCAInCalorimeter.java added at 1.1
diff -N TrackToTrackPOCAInCalorimeter.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToTrackPOCAInCalorimeter.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,15 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class TrackToTrackPOCAInCalorimeter implements StructuralLikelihoodQuantity
+{
+    public TrackToTrackPOCAInCalorimeter() {
+        // Maybe configure geometry here
+    }
+
+    public double evaluate(Cluster track1, Cluster track2) 
+    {
+	throw new AssertionError("Not implemented");
+    }
+}

lcsim/src/org/lcsim/contrib/uiowa/structural/likelihood
TrackToTrackSmallestDistanceToPOCA.java added at 1.1
diff -N TrackToTrackSmallestDistanceToPOCA.java
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ TrackToTrackSmallestDistanceToPOCA.java	29 Sep 2005 21:04:50 -0000	1.1
@@ -0,0 +1,13 @@
+package structural.likelihood; // package org.lcsim.recon.cluster.structural.likelihood;
+
+import org.lcsim.event.Cluster;
+
+public class TrackToTrackSmallestDistanceToPOCA implements StructuralLikelihoodQuantity
+{
+    public TrackToTrackSmallestDistanceToPOCA() {}
+
+    public double evaluate(Cluster track1, Cluster track2) 
+    {
+	throw new AssertionError("Not implemented");
+    }
+}
CVSspam 0.2.8