diff --git a/AnalysisHowTo/ThrownTopology/plotTopology.C b/AnalysisHowTo/ThrownTopology/plotTopology.C index 6249bffe..0ec5ab88 100644 --- a/AnalysisHowTo/ThrownTopology/plotTopology.C +++ b/AnalysisHowTo/ThrownTopology/plotTopology.C @@ -12,7 +12,7 @@ void plotTopology() { TH1F *hThrownTopologies = (TH1F*)f->Get("hThrownTopologies"); hThrownTopologies->GetXaxis()->LabelsOption(">"); // order by most common topology hThrownTopologies->GetXaxis()->SetRangeUser(0, 10); // only plot first 20 topologies - hThrownTopologies->Scale(100./hThrownTopologies->GetEntries()); // turn histogram into percentage + hThrownTopologies->Scale(100./hThrownTopologies->Integral()); // turn histogram into percentage hThrownTopologies->GetYaxis()->SetTitle("Thrown Topology %"); hThrownTopologies->Draw("htext"); diff --git a/FlattenForFSRoot/Documentation/GlueXFSRootFormat.pdf b/FlattenForFSRoot/Documentation/GlueXFSRootFormat.pdf index 3202008c..0e148649 100644 Binary files a/FlattenForFSRoot/Documentation/GlueXFSRootFormat.pdf and b/FlattenForFSRoot/Documentation/GlueXFSRootFormat.pdf differ diff --git a/FlattenForFSRoot/Documentation/GlueXFSRootFormat.tex b/FlattenForFSRoot/Documentation/GlueXFSRootFormat.tex index 13f00751..93d65c00 100644 --- a/FlattenForFSRoot/Documentation/GlueXFSRootFormat.tex +++ b/FlattenForFSRoot/Documentation/GlueXFSRootFormat.tex @@ -118,6 +118,16 @@ \section{Track Information} TkNDFP(n): number of degrees of freedom for the track fit \end{verbatim} +If PID information is requested, the following variables are added to the tree: +\begin{verbatim} + TkTOFBetaP(n): beta from the TOF (using kinematic fit if available) + TkTOFChi2P(n): chi2 from the TOF (using kinematic fit if available) + TkDEDXCDCP(n): DEDX from the CDC + TkDEDXFDCP(n): DEDX from the FDC + TkDEDXChi2P(n): chi2 for the DEDX measurement + TkDEDXNDFP(n): NDF for the DEDX measurement +\end{verbatim} + \section{Shower Information} \label{sec:nt:shower} diff --git a/FlattenForFSRoot/flatten.cc b/FlattenForFSRoot/flatten.cc index 9a73f5d8..e8df9306 100644 --- a/FlattenForFSRoot/flatten.cc +++ b/FlattenForFSRoot/flatten.cc @@ -77,6 +77,7 @@ int main(int argc, char** argv){ cout << " -numUnusedNeutrals [optional cut (<= cut)] (default: -1 (no cut))" << endl; cout << " -numNeutralHypos [optional cut (<= cut)] (default: -1 (no cut))" << endl; cout << " -usePolarization [get polarization angle from RCDB? 0 or 1] (default: 0)" << endl; + cout << " -addPID [include PID info in the output tree? 0 or 1] (default: 0)" << endl; cout << " -mcChecks [check for baryon number violation, etc.," << endl; cout << " when parsing truth information? 0 or 1] (default: 1)" << endl; cout << " -safe [check array sizes? 0 or 1] (default: 1)" << endl; @@ -121,6 +122,7 @@ int main(int argc, char** argv){ int gNumNeutralHyposCut = -1; bool gSafe = true; bool gUsePolarization = false; + bool gAddPID = false; bool gMCChecks = true; int gPrint = 0; { @@ -130,7 +132,7 @@ int main(int argc, char** argv){ if ((argi == "-in")||(argi == "-out")||(argi == "-mc")||(argi == "-mctag") ||(argi == "-chi2")||(argi == "-shQuality")||(argi == "-massWindows") ||(argi == "-numUnusedTracks")||(argi == "-usePolarization")||(argi == "-numUnusedNeutrals") - ||(argi == "-mcChecks") + ||(argi == "-mcChecks")||(argi == "-addPID") ||(argi == "-numNeutralHypos")||(argi == "-safe")||(argi == "-print")){ flag = argi; continue; @@ -147,6 +149,7 @@ int main(int argc, char** argv){ if (flag == "-numUnusedNeutrals"){ gNumUnusedNeutralsCut = atoi(argi); } if (flag == "-numNeutralHypos"){ gNumNeutralHyposCut = atoi(argi); } if (flag == "-usePolarization"){ if (argi == "1") gUsePolarization = true; } + if (flag == "-addPID"){ if (argi == "1") gAddPID = true; } if (flag == "-mcChecks"){ if (argi == "0") gMCChecks = false; } if (flag == "-safe"){ if (argi == "0") gSafe = false; } if (flag == "-print"){ gPrint = atoi(argi); } @@ -182,6 +185,7 @@ int main(int argc, char** argv){ cout << " numUnusedNeutrals cut: " << gNumUnusedNeutralsCut << endl; cout << " numNeutralHypos cut: " << gNumNeutralHyposCut << endl; cout << " use polarization? " << gUsePolarization << endl; + cout << " use PID? " << gAddPID << endl; cout << " MC checks? " << gMCChecks << endl; cout << " safe mode? " << gSafe << endl; cout << endl; @@ -642,6 +646,14 @@ int main(int argc, char** argv){ if (gUseParticles) gInTree->SetBranchAddress("ChargedHypo__ChiSq_Tracking", inChargedHypo__ChiSq_Tracking); UInt_t inChargedHypo__NDF_Tracking[MAXTRACKS] = {}; if (gUseParticles) gInTree->SetBranchAddress("ChargedHypo__NDF_Tracking", inChargedHypo__NDF_Tracking); + UInt_t inChargedHypo__NDF_DCdEdx[MAXTRACKS] = {}; + if (gUseParticles && gAddPID) gInTree->SetBranchAddress("ChargedHypo__NDF_DCdEdx", inChargedHypo__NDF_DCdEdx); + Float_t inChargedHypo__ChiSq_DCdEdx[MAXTRACKS] = {}; + if (gUseParticles && gAddPID) gInTree->SetBranchAddress("ChargedHypo__ChiSq_DCdEdx", inChargedHypo__ChiSq_DCdEdx); + Float_t inChargedHypo__dEdx_CDC[MAXTRACKS] = {}; + if (gUseParticles && gAddPID) gInTree->SetBranchAddress("ChargedHypo__dEdx_CDC", inChargedHypo__dEdx_CDC); + Float_t inChargedHypo__dEdx_FDC[MAXTRACKS] = {}; + if (gUseParticles && gAddPID) gInTree->SetBranchAddress("ChargedHypo__dEdx_FDC", inChargedHypo__dEdx_FDC); // *** Neutral Particle Hypotheses (indexed using __NeutralIndex) *** @@ -693,6 +705,8 @@ int main(int argc, char** argv){ TClonesArray *inP4_KinFit[MAXPARTICLES] = {}; Int_t inChargedIndex[MAXPARTICLES][MAXCOMBOS] = {}; + Float_t inBeta_Timing[MAXPARTICLES][MAXCOMBOS] = {}; + Float_t inChiSq_Timing[MAXPARTICLES][MAXCOMBOS] = {}; Int_t inNeutralIndex[MAXPARTICLES][MAXCOMBOS] = {}; TClonesArray *inX4[MAXPARTICLES] = {}; Float_t inPathLengthSigma[MAXPARTICLES][MAXCOMBOS] = {}; @@ -711,6 +725,14 @@ int main(int argc, char** argv){ if (gUseParticles && gUseKinFit) gInTree->SetBranchAddress(var_P4_KinFit,&(inP4_KinFit[pIndex])); TString var_ChargedIndex(name); var_ChargedIndex += "__ChargedIndex"; if (gUseParticles) gInTree->SetBranchAddress(var_ChargedIndex,inChargedIndex[pIndex]); + TString var_Beta_Timing(name); + if (gUseParticles && !gUseKinFit) var_Beta_Timing += "__Beta_Timing_Measured"; + if (gUseParticles && gUseKinFit) var_Beta_Timing += "__Beta_Timing_KinFit"; + if (gUseParticles && gAddPID) gInTree->SetBranchAddress(var_Beta_Timing,inBeta_Timing[pIndex]); + TString var_ChiSq_Timing(name); + if (gUseParticles && !gUseKinFit) var_ChiSq_Timing += "__ChiSq_Timing_Measured"; + if (gUseParticles && gUseKinFit) var_ChiSq_Timing += "__ChiSq_Timing_KinFit"; + if (gUseParticles && gAddPID) gInTree->SetBranchAddress(var_ChiSq_Timing,inChiSq_Timing[pIndex]); } // *** Combo Neutrals *** @@ -813,6 +835,9 @@ int main(int argc, char** argv){ double outMCPx[MAXPARTICLES]={}, outMCPy[MAXPARTICLES]={}, outMCPz[MAXPARTICLES]={}, outMCEn[MAXPARTICLES]={}; double outVeeL[MAXPARTICLES]={}, outVeeLSigma[MAXPARTICLES]={}; double outTkChi2[MAXPARTICLES]={}, outTkNDF[MAXPARTICLES]={}; + double outTkDEDXChi2[MAXPARTICLES]={}, outTkDEDXNDF[MAXPARTICLES]={}; + double outTkDEDXCDC[MAXPARTICLES]={}, outTkDEDXFDC[MAXPARTICLES]={}; + double outTkTOFBeta[MAXPARTICLES]={}, outTkTOFChi2[MAXPARTICLES]={}; double outShQuality[MAXPARTICLES]={}; { for (unsigned int im = 0; im < gOrderedParticleNames.size(); im++){ @@ -835,6 +860,14 @@ int main(int argc, char** argv){ TString vTkNDF("TkNDFP"); vTkNDF += fsIndex; gOutTree->Branch(vTkNDF, &outTkNDF [pIndex]); TString vTkChi2("TkChi2P"); vTkChi2 += fsIndex; gOutTree->Branch(vTkChi2,&outTkChi2[pIndex]); } + if (gAddPID && GlueXParticleClass(name) == "Charged"){ + TString vTkTOFBeta ("TkTOFBetaP"); vTkTOFBeta += fsIndex; gOutTree->Branch(vTkTOFBeta, &outTkTOFBeta[pIndex]); + TString vTkTOFChi2 ("TkTOFChi2P"); vTkTOFChi2 += fsIndex; gOutTree->Branch(vTkTOFChi2, &outTkTOFChi2[pIndex]); + TString vTkDEDXChi2("TkDEDXChi2P"); vTkDEDXChi2 += fsIndex; gOutTree->Branch(vTkDEDXChi2,&outTkDEDXChi2[pIndex]); + TString vTkDEDXNDF("TkDEDXNDFP"); vTkDEDXNDF += fsIndex; gOutTree->Branch(vTkDEDXNDF, &outTkDEDXNDF [pIndex]); + TString vTkDEDXCDC("TkDEDXCDCP"); vTkDEDXCDC += fsIndex; gOutTree->Branch(vTkDEDXCDC, &outTkDEDXCDC [pIndex]); + TString vTkDEDXFDC("TkDEDXFDCP"); vTkDEDXFDC += fsIndex; gOutTree->Branch(vTkDEDXFDC, &outTkDEDXFDC [pIndex]); + } if (GlueXParticleClass(name) == "Neutral"){ TString vQual("ShQualityP"); vQual += fsIndex; gOutTree->Branch(vQual, &outShQuality[pIndex]); } @@ -1134,6 +1167,14 @@ int main(int argc, char** argv){ outREn[pIndex] = p4->E(); outTkNDF [pIndex] = inChargedHypo__NDF_Tracking [(inChargedIndex[pIndex][ic])]; outTkChi2[pIndex] = inChargedHypo__ChiSq_Tracking[(inChargedIndex[pIndex][ic])]; + if (gAddPID){ + outTkTOFBeta[pIndex] = inBeta_Timing[pIndex][ic]; + outTkTOFChi2[pIndex] = inChiSq_Timing[pIndex][ic]; + outTkDEDXChi2[pIndex] = inChargedHypo__ChiSq_DCdEdx[(inChargedIndex[pIndex][ic])]; + outTkDEDXNDF [pIndex] = inChargedHypo__NDF_DCdEdx [(inChargedIndex[pIndex][ic])]; + outTkDEDXCDC [pIndex] = inChargedHypo__dEdx_CDC [(inChargedIndex[pIndex][ic])]; + outTkDEDXFDC [pIndex] = inChargedHypo__dEdx_FDC [(inChargedIndex[pIndex][ic])]; + } } if (gUseMCParticles && outMCSignal > 0.1){ p4 = (TLorentzVector*)inThrown__P4->At(tIndex); @@ -1897,7 +1938,8 @@ map > GlueXDecayProductMap(int fsCode1, int fsCode2){ tmp = "Photon"; tmp += (pNumber++); names.push_back(tmp); } if (name.Contains("KShort")){ tmp = "PiPlus"; tmp += (pNumber++); names.push_back(tmp); tmp = "PiMinus"; tmp += (pNumber++); names.push_back(tmp); } - if (name.Contains("Lambda")){ tmp = "Proton"; tmp += (pNumber++); names.push_back(tmp); + if (name.Contains("Lambda")&&!name.Contains("AntiLambda")) + { tmp = "Proton"; tmp += (pNumber++); names.push_back(tmp); tmp = "PiMinus"; tmp += (pNumber++); names.push_back(tmp); } if (name.Contains("AntiLambda")){ tmp = "AntiProton"; tmp += (pNumber++); names.push_back(tmp); tmp = "PiPlus"; tmp += (pNumber++); names.push_back(tmp); } diff --git a/PWA_scripts/benchmark/README.md b/PWA_scripts/benchmark/README.md new file mode 100644 index 00000000..d6129ba0 --- /dev/null +++ b/PWA_scripts/benchmark/README.md @@ -0,0 +1,14 @@ +# Introduction +This directory contains scripts for submitting batch jobs with MPI (and GPU). They are meant to determine how your fit speed improves (or not) by adding additioinal resources +* submit.py -- submits MPI jobs with various # of cores +* submitGPU.py -- submits MPI+GPU jobs with various # of GPUs +* plotBenchmark.C -- plots fit speed for the benchmark jobs + +# Required user modifications +* In submit.py and submitGPU.py you should replace the MyEnv, MyConfig and MyOutDir variables with your own environment setup script, AmpTools fit configuration and output directory location +* You can change the MyCPU or MyGPU list to contain different amounts of cores for your benchmark if you want to test with more or fewer cores/GPUs + +# Notes: +* These fits require using the MPI compiled version of AmpTools and halld_sim, see https://halldweb.jlab.org/wiki/index.php/HOWTO_use_AmpTools_on_the_JLab_farm_with_MPI for more details +* Only run the GPU version of the fitter if your fit utilizes a GPU accelerated amplitude and you've compiled that amplitude with the GPU (CUDA) libraries on one of the sciml nodes, see https://halldweb.jlab.org/wiki/index.php/HOWTO_use_AmpTools_on_the_JLab_farm_GPUs for more details +* Some of these default benchmarks require many CPUs or GPUs and may take some time for those nodes to become available on the ifarm/sciml nodes, so be patient. \ No newline at end of file diff --git a/PWA_scripts/benchmark/plot_benchmark.C b/PWA_scripts/benchmark/plot_benchmark.C new file mode 100644 index 00000000..51364612 --- /dev/null +++ b/PWA_scripts/benchmark/plot_benchmark.C @@ -0,0 +1,123 @@ +#include +#include +#include +#include +#include +#include + +void plot_benchmark(TString dir = "./") { + + gStyle->SetOptStat(0); + + // initialize list of nCores to plot + vector numThreadsCPU = {1,2,4,8,16,32,64,96,128}; + int numTestCPU = numThreadsCPU.size(); + + // for GPU fits, only add if desired + vector numThreadsGPUT4 = {1,2,3,4,6,8,10,12}; + vector numThreadsGPURTX = {}; + + // names of directories containing benchmark results + vector types = {"cpu"}; + vector grBenchmarkScan; + if(numThreadsGPUT4.size() > 0) types.push_back("gpuT4"); + if(numThreadsGPURTX.size() > 0) types.push_back("gpuTitanRTX"); + + TH1F *hBenchmarkScan = new TH1F("hBenchmarkScan","; Number of GPUs or CPUs; Fit speed (Likelihood function call rate [Hz])", 200, 0, 200); + double maxRate = 0; + + // loop over different achitecture types to plot results + for(int itype=0; itype numThreads = numThreadsCPU; + if(types[itype] == "gpuT4") numThreads = numThreadsGPUT4; + if(types[itype] == "gpuTitanRTX") numThreads = numThreadsGPURTX; + grBenchmarkScan.push_back(new TGraphErrors(numThreads.size())); + + // loop over number of threads in test + for(int ithread=0; ithread parSq; + int nValues = 0; + while (std::getline(file, read_line)) { + + TString line = read_line; + if(line.Contains("time ")) { + line.ReplaceAll("average time per function call: ",""); + line.ReplaceAll(" ms.",""); + parValue = 1./(atof(line)/1000); + parAvg += parValue; + parSq.push_back(parValue*parValue); + nValues++; + } + else continue; + + } + + if(nValues > 0) { + parAvg /= float(nValues); + double parRms = 0; + for(uint ip=0; ip maxRate) maxRate = parAvg; + //cout<SetPoint(ithread, nThreads, parAvg); + grBenchmarkScan[itype]->SetPointError(ithread, 0, parRms); + } + } + } + + TCanvas *cc = new TCanvas("cc","cc",800,400); + auto legend = new TLegend(0.47,0.17,0.9,0.42); + + hBenchmarkScan->SetMaximum(maxRate*2.5); + hBenchmarkScan->SetMinimum(0.1); + hBenchmarkScan->Draw(); + vector fit; + for(int itype=0; itypeSetMarkerStyle(20); + grBenchmarkScan[itype]->SetMarkerColor(kBlack+itype); + grBenchmarkScan[itype]->Draw("same pl"); + + fit.push_back(new TF1(types[itype],"pol1",1,200)); + fit[itype]->FixParameter(0,0); + grBenchmarkScan[itype]->Fit(fit[itype],"N","",0.5,2); + fit[itype]->SetLineColor(kBlack+itype); fit[itype]->SetLineStyle(kDashed); + fit[itype]->Draw("same"); + + if(itype==0) + legend->AddEntry(grBenchmarkScan[0],"ifarm19 CPU (2 thread/core)","pl"); + if(types[itype] == "gpuT4") + legend->AddEntry(grBenchmarkScan[itype],"sciml21 T4 GPU","pl"); + if(types[itype] == "gpuTitanRTX") + legend->AddEntry(grBenchmarkScan[itype],"sciml19 Titan RTX GPU","pl"); + } + + gPad->SetLeftMargin(0.09); + gPad->SetBottomMargin(0.15); + gPad->SetTopMargin(0.05); + gPad->SetRightMargin(0.05); + gPad->SetLogx(); gPad->SetLogy(); + gPad->SetGridy(); gPad->SetGridx(); + hBenchmarkScan->GetXaxis()->SetTitleSize(0.05); + hBenchmarkScan->GetYaxis()->SetTitleSize(0.05); + hBenchmarkScan->GetXaxis()->SetTitleOffset(1.3); + hBenchmarkScan->GetYaxis()->SetTitleOffset(0.8); + + legend->SetFillColor(0); + legend->Draw(); + + cc->Print("benchmark.png"); + + return; +} diff --git a/PWA_scripts/benchmark/submit.py b/PWA_scripts/benchmark/submit.py new file mode 100755 index 00000000..f9d1c66d --- /dev/null +++ b/PWA_scripts/benchmark/submit.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +import sys +import os +import subprocess +import math +import pwd +from optparse import OptionParser + +########################################################## MAIN ########################################################## +def main(argv): + + # SLURM INFO (see options at https://scicomp.jlab.org/scicomp/slurmJob/slurmInfo) + PARTITION = "ifarm" + CONSTRAINT = "farm19" + TIMELIMIT = "24:00:00" # Max walltime + MyCPUs = [1,2,4,8,16,32,64,96,128,192] # List of CPU cores to use in benchmark fits + + # User provided environment, fit configuration and options + MyEnv = "/work/halld2/home/jrsteven/analysisGluexI/builds/setup_gluex_scanParam.csh" + MyConfig = "/work/halld2/home/jrsteven/forBenchmark/benchmark.cfg" + MyMPIOpt = "--mca btl_openib_allow_ib 1" + MyFitOpt = "-m 100000 -r 5" + MyOutDir = "/volatile/halld/home/" + pwd.getpwuid( os.getuid() )[0] + "/benchmark/" + + # LOOP OVER # OF CORES FOR BENCHMARK + for nCores in MyCPUs: + # nodes used in fit (for every 64 CPUs allow an additional node) + nNodes = nCores/64 + 1 + + # create output directories + MyRunningDir = MyOutDir + "cpu%03d" % nCores + MyLogOutDir = MyRunningDir + "/log" + if not os.path.exists(MyOutDir): + os.makedirs(MyOutDir) + if not os.path.exists(MyRunningDir): + os.makedirs(MyRunningDir) + if not os.path.exists(MyLogOutDir): + os.makedirs(MyLogOutDir) + + # create slurm submission script + slurmOut = open("tempSlurm.txt",'w') + slurmOut.write("#!/bin/csh \n") + slurmOut.write("#SBATCH --nodes=%d \n" % nNodes) + slurmOut.write("#SBATCH --partition=%s \n" % PARTITION) + slurmOut.write("#SBATCH --constraint=%s \n" % CONSTRAINT) + slurmOut.write("#SBATCH --cpus-per-task=1 \n") + slurmOut.write("#SBATCH --ntasks-per-core=1 \n") + slurmOut.write("#SBATCH --threads-per-core=1 \n") + slurmOut.write("#SBATCH --mem=%dGB \n" % nCores) # 1 GB per core + slurmOut.write("#SBATCH --time=%s \n" % TIMELIMIT) + slurmOut.write("#SBATCH --ntasks=%d \n" % (nCores+1)) + + slurmOut.write("#SBATCH --chdir=%s \n" % MyRunningDir) + slurmOut.write("#SBATCH --error=%s/fit.err \n" % (MyLogOutDir)) + slurmOut.write("#SBATCH --output=%s/fit.out \n" % (MyLogOutDir)) + slurmOut.write("#SBATCH --job-name=benchfit_%03d \n\n\n" % nCores) + + # commands to execute during job + slurmOut.write("pwd \n") + slurmOut.write("source %s \n" % MyEnv) + slurmOut.write("mpirun %s fitMPI -c %s %s \n" % (MyMPIOpt, MyConfig, MyFitOpt)) + slurmOut.close() + + # submit individual job + print("Submitting %d core job on %d nodes" % (nCores, nNodes)) + subprocess.call(["sbatch", "tempSlurm.txt"]) + os.remove("tempSlurm.txt") + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/PWA_scripts/benchmark/submitGPU.py b/PWA_scripts/benchmark/submitGPU.py new file mode 100755 index 00000000..71fd66df --- /dev/null +++ b/PWA_scripts/benchmark/submitGPU.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +import sys +import os +import subprocess +import math +import pwd +from optparse import OptionParser + +########################################################## MAIN ########################################################## +def main(argv): + + # SLURM INFO (see options at https://scicomp.jlab.org/scicomp/slurmJob/slurmInfo) + PARTITION = "gpu" + GPUTYPE = "T4" + TIMELIMIT = "24:00:00" # Max walltime + MyGPUs = [1,2,3,4] # List of GPU cards to use in benchmark fits + + # User provided environment, fit configuration and options + MyEnv = "/work/halld2/home/jrsteven/2021-amptools/builds_gpu/setup_gluex_dev.csh" + MyConfig = "/work/halld2/home/jrsteven/forBenchmark/benchmark.cfg" + MyMPIOpt = "--mca btl_openib_allow_ib 1" + MyFitOpt = "-m 100000 -r 5" + MyOutDir = "/volatile/halld/home/" + pwd.getpwuid( os.getuid() )[0] + "/benchmark/" + + # LOOP OVER # OF GPUs FOR BENCHMARK + for nGPUs in MyGPUs: + + # Two types of nodes/GPUs (sciml19 and sciml21), both with 3 each + nNodes = 1 + if GPUTYPE=="T4": # 16 allowed in a single job + if nGPUs > 8: nNodes=2 + if GPUTYPE=="TitanRTX": # 4 allowed in a single job + if nGPUs > 4: nNodes=2 + if nGPUs > 8: nNodes=3 + + # create output directories + MyRunningDir = MyOutDir + "gpu%s%03d" % (GPUTYPE,nGPUs) + MyLogOutDir = MyRunningDir + "/log" + if not os.path.exists(MyOutDir): + os.makedirs(MyOutDir) + if not os.path.exists(MyRunningDir): + os.makedirs(MyRunningDir) + if not os.path.exists(MyLogOutDir): + os.makedirs(MyLogOutDir) + + # create slurm submission script + slurmOut = open("tempSlurm.txt",'w') + slurmOut.write("#!/bin/csh \n") + slurmOut.write("#SBATCH --nodes=%d \n" % nNodes) + slurmOut.write("#SBATCH --partition=%s \n" % PARTITION) + slurmOut.write("#SBATCH --gres=gpu:%s:%d \n" % (GPUTYPE,nGPUs)) + slurmOut.write("#SBATCH --cpus-per-task=1 \n") + slurmOut.write("#SBATCH --ntasks-per-core=1 \n") + slurmOut.write("#SBATCH --threads-per-core=1 \n") + slurmOut.write("#SBATCH --mem=20GB \n") # multiplied by nGPUs in slurm? + slurmOut.write("#SBATCH --time=%s \n" % TIMELIMIT) + slurmOut.write("#SBATCH --ntasks=%d \n" % (nGPUs+1)) + + slurmOut.write("#SBATCH --chdir=%s \n" % MyRunningDir) + slurmOut.write("#SBATCH --error=%s/fit.err \n" % (MyLogOutDir)) + slurmOut.write("#SBATCH --output=%s/fit.out \n" % (MyLogOutDir)) + slurmOut.write("#SBATCH --job-name=benchfitgpu_%03d \n\n\n" % nGPUs) + + # commands to execute during job + slurmOut.write("pwd \n") + slurmOut.write("source %s \n" % MyEnv) + slurmOut.write("mpirun %s fitMPI -c %s %s \n" % (MyMPIOpt, MyConfig, MyFitOpt)) + slurmOut.close() + + # submit individual job + print("Submitting %d GPU job on %d %s nodes" % (nGPUs, nNodes, GPUTYPE)) + subprocess.call(["sbatch", "tempSlurm.txt"]) + os.remove("tempSlurm.txt") + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/PythonTools/gluupy/EXAMPLE_histmaker_eta_3pi0.py b/PythonTools/gluupy/EXAMPLE_histmaker_eta_3pi0.py new file mode 100755 index 00000000..6aad98d9 --- /dev/null +++ b/PythonTools/gluupy/EXAMPLE_histmaker_eta_3pi0.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +from gluupy_histmaker import * + +# CUTS! +# # Each cut on single line is a list: branch name, cut string, value +# # Cut is applied by parsing a cut string. It expects the format: "cut stringname [operator] " followed by float +# # # 1: cut quantity (this is for human readability only, has no impact on code) +# # # 2: arithmetic operator (examples: ">", "<", "==", "!=" for greater than, less than, equals, and not equals ) +# # # If a cut is supplied that doesn't follow this form, we exit early +ALL_CUTS_LIST = [ + ["beam_p4_meas__E","Beam E > ",6.0], + ["p_p4_pmag" ,"Proton momentum > ",0.350], + ["chi2_ndf","chi^2/ndf < ",5.0], +] + +def main(argv): + + # Usage: + # >> thisScript.py [OUTFILE.root] [INFILE1.root] [INFILE2.root ...] + # Additional options: -M [num] to process only first [num] entries, -V verbose output, -f force overwrite output, --max-entries-perfile [num] process first [num] entries from each file + args = gluupy_setup(argv,ALL_CUTS_LIST) # Initial standardized setup, returns all parsed "args" from argparse module. It also checks that cuts in ALL_CUTS_LIST appear valid + + # Set from arguments read in + infile_name = args.infile_list[0] + max_entries=int(args.max_entries) # Convert float to int (float was desirable in options parsing so that scientific notation like 1e6 for 1,000,000 was accepted) + + nbins = 500 + # Create, fill histograms + h_chi2_ndf_nocuts = TH1F("h_chi2_ndf_nocuts","",nbins,0.,10.) + h_eta_kin_nocuts = TH1F("h_eta_kin_nocuts","",nbins,0.,1.) + # Histograms filled after cuts + h_chi2_ndf_postcuts = TH1F("h_chi2_ndf_postcuts","",nbins,0.,10.) + h_eta_kin = TH1F("h_eta_kin","",nbins,0.,1.) + h_eta_kin_doublecountallowed = TH1F("h_eta_kin_doublecountallowed","",nbins,0.,1.) + h_NumFS_ThisBeam = TH1F("h_NumFS_ThisBeam","",nbins,0.,20.) + + # Reaction independent (assuming you reconstruct a proton) + # branch_names_to_use = [] # Empty list fetches all branches (will be slower) + branch_names_to_use = ["event","chi2_ndf", "accidweight","beam_p4_meas__E","p_p4_meas_px","p_p4_meas_py","p_p4_meas_pz",] + branch_names_to_use.extend(["eta_mass_meas","eta_mass_kin","pi0_1_mass_meas","pi0_2_mass_meas","pi0_3_mass_meas","pi0_1_mass_kin","pi0_2_mass_kin","pi0_3_mass_kin",]) + + # Get branches using uproot + branches_dict = GetBranchesUproot(args.infile_list[0],max_entries,branch_names_to_use) # Arguments: filename, OPTIONAL: max entries to parse (default=-1 => all entries), OPTIONAL: list of string branchnames to retrieve (default=get all branches) + # If you want to define any of your own branches, do so here + # branches_dict["x4_prot_meas_R"] = np.sqrt( branches_dict["x4_prot_meas_x"]**2 + branches_dict["x4_prot_meas_y"]**2 ) + branches_dict["p_p4_pmag"] = np.sqrt( branches_dict["p_p4_meas_px"]**2 + branches_dict["p_p4_meas_py"]**2 + branches_dict["p_p4_meas_pz"]**2) + + # Fill these histograms before any cuts (neglect FS weighting, some double counting will occur) + FillHistFromBranchDict(h_chi2_ndf_nocuts,branches_dict,"chi2_ndf",DoFSWeighting=False) + FillHistFromBranchDict(h_eta_kin_nocuts,branches_dict,"eta_mass_kin",DoFSWeighting=False) + + # Apply cuts (and add final state weight factors as new branch "FS_weight") + branches_dict = ApplyCutsReduceArrays(branches_dict,ALL_CUTS_LIST) + branches_dict["Num_FS"] = 1./branches_dict["FS_weight"] + + # Fill histograms after cuts. Adding FS weight factors prevents double counting. + FillHistFromBranchDict(h_chi2_ndf_postcuts,branches_dict,"chi2_ndf") + FillHistFromBranchDict(h_eta_kin,branches_dict,"eta_mass_kin") + FillHistFromBranchDict(h_eta_kin_doublecountallowed,branches_dict,"eta_mass_kin",DoFSWeighting=False) + FillHistFromBranchDict(h_NumFS_ThisBeam,branches_dict,"Num_FS",DoAccidentalSub=False,DoFSWeighting=False) + + SaveAllHists(args.outfile) #Saves ALL histograms opened/created to this point. Print overall processing rate + + + + +if __name__ == "__main__": + main(sys.argv[1:]) + diff --git a/PythonTools/gluupy/README.md b/PythonTools/gluupy/README.md new file mode 100644 index 00000000..c1b84a06 --- /dev/null +++ b/PythonTools/gluupy/README.md @@ -0,0 +1,43 @@ +# gluupy: +A python-based tool for parsing ROOT files, applying cuts, and creating histograms in the GlueX context. + +* Author: Jon Zarling (jzarling@jlab.org, Jonathan.Zarling@uregina.ca) + +## Preliminary version + +Last updated 8/17/2021 + +## Requirements: +* Python 2.7.X (no python 3 yet, sorry) +* uproot (https://uproot.readthedocs.io/) +* PyROOT (python 2 compatible) +* Additional python modules: numpy, xxhash, and lz4 + + +## Installation instructions with pip and python 2.7 + +### Initial setup (for use on JLab ifarm, or similar cluster) +1. Move ~/.local to desired location, then make symbolic link back to ~/.local + * This is where pip will put its packages + * Default home directory can fill up fast, this way we won't contribute to limited storage space there +2. Self update pip (to latest python 2.7.X compatible version) +> pip install --upgrade "pip < 21.0" --user + * Add this alias in your login script to use this newer pip version + > alias pip '~/.local/bin/pip' + * Verify pip version with: (after sourcing environment again, of course) + > pip show pip + * Version should be 20.3.4 or similar + +### Installing/upgrading packages (numpy, lz4, xxhash, and uproot): +> pip install numpy lz4 xxhash uproot --upgrade --user + +# Environment setup: +Add PyROOT and gluupy lib to your PYTHONPATH + * In csh: + > setenv PYTHONPATH $HD_UTILITIES_HOME/PythonTools/gluupy:/$ROOTSYS/lib:$PYTHONPATH + * In bash: + > export PYTHONPATH="$HD_UTILITIES_HOME/PythonTools/gluupy:/$ROOTSYS/lib:$PYTHONPATH" + +# Testfile location on ifarm work disk + +See input file located at /w/halld-sciwork18/home/jzarling/gluupy_testfile/eta_3pi0_SP18_LZ4.root diff --git a/PythonTools/gluupy/gluupy_histmaker.py b/PythonTools/gluupy/gluupy_histmaker.py new file mode 100644 index 00000000..ab584f26 --- /dev/null +++ b/PythonTools/gluupy/gluupy_histmaker.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python + +# Standard packages +import argparse, os.path, sys, time +# These may require installation (e.g. with "pip install [package] --user" command) +import uproot +import numpy as np +# PyROOT: you may need to add to PYTHONPATH (e.g. setenv/export PYTHONPATH $ROOTSYS/lib:$PYTHONPATH) +from ROOT import TFile, TH1F, gDirectory + + +# CUTS +# # Each cut on single line is a list: [branch name, cut string, value] +# # Cut is applied by parsing a cut string. It expects the format: "cut stringname [operator] ". Cut value is given by float in next list entry +# # # 1: cut quantity (this portion for human readability only, has no impact on code) +# # # 2: arithmetic operator (examples: ">", "<", "==", "!=" for greater than, less than, equals, and not equals) +# # # If a cut is supplied that doesn't follow this form, we exit early +ACCEPED_CUT_OPERS = ["<","<=",">",">=","==","!=","gt_abs","lt_abs"] # List of operators implemented for applying cuts. + +VERBOSE = True +GLOBAL_T0=0 +GLOBAL_NUM_ENTRIES=0 + +np.set_printoptions(precision=4,linewidth=250) + +def gluupy_setup(argv,cuts_list): + + # Setup parser + parser_usage = "thisScript.py [OUTFILE.root] [INFILE1.root] [INFILE2.root ...]" + parser = argparse.ArgumentParser(usage = parser_usage) + # Define parser options. If no "-" or "--" flags in the add_argument below, then these are ''positional'' arguments + parser.add_argument("-V", "-v", "--verbose",action='count',default=0, help="Print verbose output (default=off)") + parser.add_argument("-f", dest="force_overwrite",action='count',default=0, help="Overwrite existing output file (if any)") + parser.add_argument("-M", "--max-entries", dest="max_entries",type=float,default=-1, help="Maximum entries to process before stopping (e.g. 1000 or 1e3). If multiple input files, counting is NOT reset between them") + parser.add_argument("--max-entries-perfile", dest="max_perfile",type=float,default=-1, help="Maximum entries to process before stopping (e.g. 1000 or 1e3). If multiple input files, counting is NOT reset between them") + parser.add_argument("outfile",nargs='?',default="",help="output ROOT file") # The '?' consumes one command line input if possible, otherwise uses default. + parser.add_argument("infile_list",default=[],nargs="*",help="input ROOT file(s)") + # Now get arguments + args = parser.parse_args() # Contains all argumetns + + # Setup bools from input flags + global VERBOSE + allow_overwrite= True if args.force_overwrite>=1 else False + VERBOSE = True if args.verbose>=1 else False + + # For tracking how long things take to run + global GLOBAL_T0 + GLOBAL_T0 = time.clock() + + # Input checks + CheckInputs(args.outfile,args.infile_list,allow_overwrite,parser_usage) # Check all intput files are .root files that exist + CheckCutList(cuts_list) # Check that each cut in list of cuts appears valid + max_perfile=-1 # Default value: parse ALL events + max_perfile=int(args.max_perfile) # Convert float to int (float was desirable in options parsing so that scientific notation like 1e6 for 1,000,000 was accepted) + if(max_perfile!=-1): + print "max_perfile not implemented yet, exiting..." + sys.exit() + + return args + + + +def CheckInputs(outfile,infile_list,allow_overwrite=False,parser_usage=""): + + if(len(infile_list)==0): + print "ERROR: no input files provided! Exiting..." + print "Usage: \n\t" + parser_usage + sys.exit() + if(len(infile_list)>=2): + print "ERROR: No support for multiple input files yet, exiting..." + sys.exit() + if(os.path.exists(outfile) and not allow_overwrite): + print "ERROR: output file already exists! Exiting..." + print "Usage: \n\t" + parser_usage + sys.exit() + if(".root" not in outfile): + print "ERROR: output file does not contain .root extension!\nFilename provided: "+outfile+" \nExiting..." + print "Usage: \n\t" + parser_usage + sys.exit() + + for fname in infile_list: + if(not os.path.exists(fname)): + print "ERROR: input file does not exist! Exiting..." + print "Filename: " + fname + sys.exit() + if(".root" not in fname): + print "ERROR: input file does not appear to be a ROOT file (check that filename ends in .root)" + print "Filename: " + fname + sys.exit() + +def PrintAllBranchesUproot(fname): + ufile = uproot.open(fname) + + # Get keynames corresponding to TTree. There should only be one top-level unique key (if not, exit) + all_keys=set() #Unique names only + for key in ufile.keys(): all_keys.add(key.split(";")[0]) # Keynames are stores in format [key];[cycle_no]. Use split to get only the keyname + if(len(all_keys)>=2): + print "ERROR: more than two unique keys found in base directory of ROOT file. Check input file..." + print "keynames found: " + str(all_keys) + sys.exit() + treename="" + for key in all_keys: treename=key # Annoyingly, can't get items inside set by indexing, have to retrieve by a loop like this instead + + print "ALL branches: " + print str(ufile[treename].keys()) + + + +def GetBranchesUproot(fname,max_entries=-1,branchname_list=[]): + + ufile = uproot.open(fname) + + if(VERBOSE and max_entries!=-1): print "Retrieving only the first " + str(max_entries) + " entries in tree..." + + # Get keynames corresponding to TTree. There should only be one top-level unique key (if not, exit) + all_keys=set() #Unique names only + for key in ufile.keys(): all_keys.add(key.split(";")[0]) # Keynames are stores in format [key];[cycle_no]. Use split to get only the keyname + if(len(all_keys)>=2): + print "ERROR: more than two unique keys found in base directory of ROOT file. Check input file..." + print "keynames found: " + str(all_keys) + sys.exit() + treename="" + for key in all_keys: treename=key # Annoyingly, can't get items inside set by indexing, have to retrieve by a loop like this instead + + tot_file_entries=ufile[treename].num_entries + num_entries_to_parse=max_entries if tot_file_entries>max_entries else tot_file_entries + + print "Total number of entries in file: " + str(tot_file_entries) + + branches_to_retrieve=[] + if(len(branchname_list)==0): branches_to_retrieve=ufile[treename].keys() # Get ALL branches + else: branches_to_retrieve=branchname_list # Get only specified branches + + # Always need these three arrays to calculate FS weights, add if not already there + if("event" not in branches_to_retrieve): branches_to_retrieve.append("event") + if("beam_p4_meas__E" not in branches_to_retrieve): branches_to_retrieve.append("beam_p4_meas__E") + if("beam_x4_meas_t" not in branches_to_retrieve): branches_to_retrieve.append("beam_x4_meas_t") + + if(VERBOSE and len(branchname_list)==0): print "Retriving ALL branches from tree...\n All branches: " + str(branches_to_retrieve) + if(VERBOSE and len(branchname_list)!=0): print "Retriving only the following branches from tree " + str(branches_to_retrieve) + + br_arr_dict = {} + # nentries = ufile[treename].num_entries + # if(max_entries!=-1 and ufile[treename].num_entries>max_entries): nentries=max_entries + for branch in branches_to_retrieve: + if(VERBOSE): print "Retrieving branch: " + branch + np_arr = np.empty(1) + if(max_entries==-1): np_arr = ufile[treename+"/"+branch].array(library="np") + else: np_arr = ufile[treename+"/"+branch].array(library="np",entry_stop=max_entries) + br_arr_dict[branch] = np_arr + + global GLOBAL_NUM_ENTRIES + GLOBAL_NUM_ENTRIES+=len(br_arr_dict[br_arr_dict.keys()[0]]) + + return br_arr_dict + +def GetEntriesPassingCuts(array_dict,cut_list): + + print "Applying cuts..." + nentries=len( array_dict[ array_dict.keys()[0] ] ) + if(VERBOSE): print "Num. entries before passing cuts " + str(nentries) + "\n" + passes_cuts = np.ones(nentries,dtype=bool) #Initialize (all passing cuts) + NumPassingCutsList = [] + NumSurvivedLastCut = nentries + for i in range(0, len(cut_list)): + cut_branchname = cut_list[i][0] + cut_expression = cut_list[i][1] + cut_value = cut_list[i][2] + if(cut_branchname not in array_dict.keys() ): + print "ERROR! Could not find branch " + cut_branchname + " from those available\n All branches read in or made on-the-fly : " + str(array_dict.keys())+"\n If not selecting all branches, check to see that necessary branches are included" + sys.exit() + if(VERBOSE): print "Applying cut: " + cut_expression + " " + str(cut_value) + cut_op = GetWhichCutOperator(cut_expression) + if(cut_op=="<"): passes_cuts &= np.where(array_dict[cut_branchname]< cut_value,True,False) + if(cut_op=="<="): passes_cuts &= np.where(array_dict[cut_branchname]<=cut_value,True,False) + if(cut_op==">"): passes_cuts &= np.where(array_dict[cut_branchname]> cut_value,True,False) + if(cut_op==">="): passes_cuts &= np.where(array_dict[cut_branchname]>=cut_value,True,False) + if(cut_op=="=="): passes_cuts &= np.where(array_dict[cut_branchname]==cut_value,True,False) + if(cut_op=="!="): passes_cuts &= np.where(array_dict[cut_branchname]!=cut_value,True,False) + if(cut_op=="gt_abs"): passes_cuts &= np.where( abs(array_dict[cut_branchname]-cut_value)>cut_list[i][3],True,False) + if(cut_op=="lt_abs"): passes_cuts &= np.where( abs(array_dict[cut_branchname]-cut_value)0.000001,1.,0.) #Make either 1 or 0 + # FSIsNew_NonZeroIndices contains all the indexes of combos that are the first encounter of that particular event+beam photon + FSIsNew_NonZeroIndices = np.nonzero(FSIsNew)[0] # Why the [0] at the end? np.nonzero returns tuple of np_arrays. FSIsNew has 1 dimension. [1] would be second dimension, etc. + FSIsNew_NonZeroIndices_diff = np.ediff1d(FSIsNew_NonZeroIndices,to_end=int(nentries-FSIsNew_NonZeroIndices[-1]) ) #Have to tack on the last unique event+beam photon by hand + + FS_weights=np.ones(nentries) + counter = 0 + for val in FSIsNew_NonZeroIndices_diff: + if(val==1): + counter+=1 + continue + else: + for i in range(val): + FS_weights[counter]=1./val + counter+=1 + if(counter!=nentries): + print "ERROR, something went wrong!!!" + print "Counter reached: " + str(counter) + print "nentries: " + str(nentries) + print "What I had so far: " + str(FS_weights) + sys.exit() + time_to_run = time.clock()-t0 + print "Time to get final FS weight factors: " + str(time_to_run) + + return FS_weights + + +def ApplyCutsReduceArrays(branches_dict,cuts_list,calc_FSweights=True): + entries_passing_cuts = GetEntriesPassingCuts(branches_dict,cuts_list) #Returns 1D numpy array of booleans (True=event passes cuts, False=event is cut) + branches_dict = ApplyCuts(branches_dict,entries_passing_cuts) # Apply cuts in a second, separate step. Removes cut events from np arrays. + if(calc_FSweights): branches_dict["FS_weight"] = GetFSWeights(branches_dict) #MUST be done after applying cuts, or else the factors here will be incorrect + return branches_dict + +def CheckCutList(cut_list): + + print "Checking cut list" + for cut in cut_list: + # Every 'cut' in loop should be a list of 3 objects, where + # # Check length of list and type of all three values stored inside + if(len(cut)!=3 and len(cut)!=4): + print "ERROR: improper cut supplied in cut list. Three arguments are expected, but " + str(len(cut)) + " were found. Exiting..." + sys.exit() + if(str(type(cut[0]))!=""): + print "ERROR: first entry for this cut is not expected type 'str'. Type found instead: "+str(type(cut[0]))+". Check your list of cuts. Exiting..." + sys.exit() + if(str(type(cut[1]))!=""): + print "ERROR: second entry for this cut is not expected type 'str'. Type found instead: "+str(type(cut[1]))+". Check your list of cuts. Exiting..." + sys.exit() + cut_type_str = str(type(cut[2])) + if("int" not in cut_type_str and "float" not in cut_type_str): + print "ERROR: third entry for this cut does not match accepted types: 'int' and 'float'. Type found instead: "+cut_type_str+". Check your list of cuts. Exiting..." + sys.exit() + + # Check that every cut string is well formed (has math expression with something on each side of a math operator --- math operator must be implemented) + for cut in cut_list: + cut_op = GetWhichCutOperator(cut[1]) + if(cut_op=="ERROR"): + print "ERROR: cut string does not have any acceptable cut operator defined in list ACCEPED_CUT_OPERS.\n Cut string: " + cut[1] + "\n accepted operators " + str(ACCEPED_CUT_OPERS) + "\n Exiting... " + sys.exit() + print "Done checking cuts" + + return + +def FillHistFromBranchDict(h,branches_dict,branchname,DoAccidentalSub=True,DoFSWeighting=True): + n = len(branches_dict[branchname]) + if(DoAccidentalSub and "accidweight" not in branches_dict.keys()): + print "ERROR: could not find accidental weight branch in branch_dict! Exiting..." + sys.exit() + if(DoFSWeighting and "FS_weight" not in branches_dict.keys()): + print "ERROR: could not find FS weights branch in branch_dict! \nBe sure ApplyCutsReduceArrays is called before calling this function. Exiting..." + sys.exit() + if(not DoAccidentalSub and not DoFSWeighting): h.FillN(n,branches_dict[branchname],np.ones(n)) + if(DoAccidentalSub and not DoFSWeighting): h.FillN(n,branches_dict[branchname],branches_dict["accidweight"]) + if(DoAccidentalSub and DoFSWeighting): h.FillN(n,branches_dict[branchname],branches_dict["accidweight"]*branches_dict["FS_weight"]) + if(not DoAccidentalSub and DoFSWeighting): h.FillN(n,branches_dict[branchname],branches_dict["FS_weight"]) + + return + +def GetWhichCutOperator(cut_string): + for op in ACCEPED_CUT_OPERS: + # the < and > symbols are substrings of <= and >= respectively, consider separately + if(op=="<" and op in cut_string and "<=" not in cut_string): return "<" + elif(op==">" and op in cut_string and ">=" not in cut_string): return ">" + elif(op in cut_string): return op + # print "NOTHING FOUND FOR OP: " + op + return "ERROR" + +def SaveAllHists(fname): + all_objects_list = gDirectory.GetList() + f = TFile.Open(fname,"RECREATE") + f.cd() + + for obj in all_objects_list: + if(str(type(obj))==""): + obj.Write() + f.Close() + + time_taken = time.clock()-GLOBAL_T0 + rate = GLOBAL_NUM_ENTRIES/time_taken/1000. # kHz + + print "Processed " + str(GLOBAL_NUM_ENTRIES) + " entries in " + str(time_taken) + " seconds" + print "\tor, total processing rate of " + str(rate) + " kHz" + + return + +if __name__ == "__main__": + main(sys.argv[1:]) + diff --git a/README.md b/README.md index b28a1039..12fd32b6 100644 --- a/README.md +++ b/README.md @@ -3,4 +3,4 @@ various stand-alone scripts and programs for various purposes * CDC_scripts - Scripts to calculate magnetic field correction to Garfield tables, and extract thresholds from configuration files * CDC_new_dedx - DSelector to apply space-charge correction to CDC dE/dx - +* get_file_time - python script to extract the start time from an evio file diff --git a/b1pi_test/b1pi_cron_one.sh b/b1pi_test/b1pi_cron_one.sh index 85813fd8..fc232be0 100755 --- a/b1pi_test/b1pi_cron_one.sh +++ b/b1pi_test/b1pi_cron_one.sh @@ -25,11 +25,11 @@ mkdir -pv $RUN_DIR cd $RUN_DIR $B1PI_TEST_DIR/b1pi_test.sh -4 -n $nevents -r $RUN echo \#count events -echo \#count b1_pi.hddm `hddm_counter.pl b1_pi.hddm physicsEvent` -echo \#count hdgeant.hddm `hddm_counter.pl hdgeant.hddm physicsEvent` -echo \#count hdgeant_smeared.hddm `hddm_counter.pl hdgeant_smeared.hddm physicsEvent` -echo \#count dana_rest.hddm `hddm_counter.pl dana_rest.hddm reconstructedPhysicsEvent` -echo \#count dana_rest_b1pi.hddm `hddm_counter.pl dana_rest_b1pi.hddm reconstructedPhysicsEvent` +echo \#count b1_pi.hddm `./hddm_counter.pl b1_pi.hddm physicsEvent` +echo \#count hdgeant.hddm `./hddm_counter.pl hdgeant.hddm physicsEvent` +echo \#count hdgeant_smeared.hddm `./hddm_counter.pl hdgeant_smeared.hddm physicsEvent` +echo \#count dana_rest.hddm `./hddm_counter.pl dana_rest.hddm reconstructedPhysicsEvent` +echo \#count dana_rest_b1pi.hddm `./hddm_counter.pl dana_rest_b1pi.hddm reconstructedPhysicsEvent` export PLOTDIR=/group/halld/www/halldweb/html/b1pi/$TODAYS_DATE/$BMS_OSNAME/Run$RUN mkdir -pv $PLOTDIR cp -v *.pdf *.gif *.html $PLOTDIR @@ -41,11 +41,11 @@ mkdir -pv $RUN_DIR cd $RUN_DIR $B1PI_TEST_DIR/b1pi_test.sh -n $nevents -r $RUN -4 echo \#count events -echo \#count b1_pi.hddm `hddm_counter.pl b1_pi.hddm physicsEvent` -echo \#count hdgeant.hddm `hddm_counter.pl hdgeant.hddm physicsEvent` -echo \#count hdgeant_smeared.hddm `hddm_counter.pl hdgeant_smeared.hddm physicsEvent` -echo \#count dana_rest.hddm `hddm_counter.pl dana_rest.hddm reconstructedPhysicsEvent` -echo \#count dana_rest_b1pi.hddm `hddm_counter.pl dana_rest_b1pi.hddm reconstructedPhysicsEvent` +echo \#count b1_pi.hddm `./hddm_counter.pl b1_pi.hddm physicsEvent` +echo \#count hdgeant.hddm `./hddm_counter.pl hdgeant.hddm physicsEvent` +echo \#count hdgeant_smeared.hddm `./hddm_counter.pl hdgeant_smeared.hddm physicsEvent` +echo \#count dana_rest.hddm `./hddm_counter.pl dana_rest.hddm reconstructedPhysicsEvent` +echo \#count dana_rest_b1pi.hddm `./hddm_counter.pl dana_rest_b1pi.hddm reconstructedPhysicsEvent` export PLOTDIR=/group/halld/www/halldweb/html/b1pi/$TODAYS_DATE/$BMS_OSNAME/Run$RUN mkdir -pv $PLOTDIR cp -v *.pdf *.gif *.html $PLOTDIR diff --git a/cache_small/button_blue.png b/cache_small/button_blue.png new file mode 100644 index 00000000..5e14f8a1 Binary files /dev/null and b/cache_small/button_blue.png differ diff --git a/cache_small/button_red.png b/cache_small/button_red.png new file mode 100644 index 00000000..2f6d5120 Binary files /dev/null and b/cache_small/button_red.png differ diff --git a/cache_small/cache_small_file_report.html b/cache_small/cache_small_file_report.html new file mode 100644 index 00000000..b2d61002 --- /dev/null +++ b/cache_small/cache_small_file_report.html @@ -0,0 +1 @@ +
count_millionsuser
6.6876tbritton
3.9519gxproj3
1.4424ijaegle
0.6272gxproj2
0.5766gxproj1
0.1777jzarling
0.1764gxproj5
0.1738gxproj4
0.0786nwickjlb
0.0779scole
0.0396acernst
0.0151billlee
0.0119davidl
0.0082gxproj6
0.0060mkamel
0.0052sdobbs
0.0038acschick
0.0034halldata
0.0012gxproj9
0.0003gluex
\ No newline at end of file diff --git a/cache_small/find_command.txt b/cache_small/find_command.txt new file mode 100644 index 00000000..623860df --- /dev/null +++ b/cache_small/find_command.txt @@ -0,0 +1 @@ +[marki@ifarm1901 ~]$ find /cache/halld -type f -size -1048576c -exec stat -c "%n %U %X %Y %s" {} \; > /u/scratch/marki/cache_small_files.txt diff --git a/cache_small/load_small_file_info.sql b/cache_small/load_small_file_info.sql new file mode 100644 index 00000000..5843cd3d --- /dev/null +++ b/cache_small/load_small_file_info.sql @@ -0,0 +1,30 @@ +/********************************************************************** + +Grants: + +grant all on cacheInfo.* to marki@localhost identified by '*****'; +grant file on *.* to marki@localhost; + +Invocation: + +sed 's/\(.*\)\//\1 /' cache_small_files.txt > csf.tmp +mysql -umarki -p cacheInfo < load_small_file_info.sql + +Example query: + +select count(*) as c, user from smallFile group by user order by c desc; + +**********************************************************************/ + +drop table if exists smallFile; +create table smallFile ( + dir varchar(1024), + name varchar(1024), + user varchar(32), + atime int, + mtime int, + size int +); +load data local infile + './csf.tmp' into table smallFile + fields terminated by ' '; diff --git a/cache_small/make_top.sh b/cache_small/make_top.sh new file mode 100755 index 00000000..eb4d6588 --- /dev/null +++ b/cache_small/make_top.sh @@ -0,0 +1,25 @@ +#!/bin/bash +rm -rf small_files +mkdir small_files +cp -v button_red.png button_blue.png small_files +cd small_files +rm -f top_level.tmp +mysql --silent -umarki -phybrid cacheInfo -e 'select count(*) as count, user from smallFile group by user order by count desc;' > top_level.tmp +rm -f index.html +echo \Small Files on the Cache Disk\ > index.html +echo `date` >> index.html +echo \ >> index.html +echo \\count\\user\\directories by name\\directories by count\\ >> index.html +awk '{print ""}' < top_level.tmp >> index.html +echo \ >> index.html +rm -f users.tmp +mysql --silent -umarki -phybrid cacheInfo -e 'select unique user from smallFile;' > users.tmp +while read u; do + echo "$u" + rm -f sql.tmp + echo select dir, count\(*\) as count, name as example_file from smallFile where user = \'$u\' group by dir order by dir\; > sql.tmp + mysql --html -umarki -phybrid cacheInfo < sql.tmp > ${u}_by_dir.html + rm -f sql.tmp + echo select dir, count\(*\) as count, name as example_file from smallFile where user = \'$u\' group by dir order by count desc, dir\; > sql.tmp + mysql --html -umarki -phybrid cacheInfo < sql.tmp > ${u}_by_count.html +done < users.tmp diff --git a/comp_mod/RunPeriod-2017-01-reprocess.xml b/comp_mod/RunPeriod-2017-01-reprocess.xml new file mode 100644 index 00000000..890d1bc5 --- /dev/null +++ b/comp_mod/RunPeriod-2017-01-reprocess.xml @@ -0,0 +1,316 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dirc/dselector/DSelector_kpkm.C b/dirc/dselector/DSelector_kpkm.C new file mode 100644 index 00000000..a4ba8584 --- /dev/null +++ b/dirc/dselector/DSelector_kpkm.C @@ -0,0 +1,234 @@ +#include "DSelector_kpkm.h" + +void DSelector_kpkm::Init(TTree *locTree) +{ + // USERS: IN THIS FUNCTION, ONLY MODIFY SECTIONS WITH A "USER" OR "EXAMPLE" LABEL. LEAVE THE REST ALONE. + + // The Init() function is called when the selector needs to initialize a new tree or chain. + // Typically here the branch addresses and branch pointers of the tree will be set. + // Init() will be called many times when running on PROOF (once per file to be processed). + + //USERS: SET OUTPUT FILE NAME //can be overriden by user in PROOF + dOutputFileName = "kpkm.root"; //"" for none + dOutputTreeFileName = ""; //"" for none + dFlatTreeFileName = ""; //output flat tree (one combo per tree entry), "" for none + dFlatTreeName = ""; //if blank, default name will be chosen + + //Because this function gets called for each TTree in the TChain, we must be careful: + //We need to re-initialize the tree interface & branch wrappers, but don't want to recreate histograms + bool locInitializedPriorFlag = dInitializedFlag; //save whether have been initialized previously + DSelector::Init(locTree); //This must be called to initialize wrappers for each new TTree + //gDirectory now points to the output file with name dOutputFileName (if any) + if(locInitializedPriorFlag) + return; //have already created histograms, etc. below: exit + + Get_ComboWrappers(); + dPreviousRunNumber = 0; + + /*********************************** EXAMPLE USER INITIALIZATION: ANALYSIS ACTIONS **********************************/ + + // EXAMPLE: Create deque for histogramming particle masses: + // // For histogramming the phi mass in phi -> K+ K- + // // Be sure to change this and dAnalyzeCutActions to match reaction + std::deque MyPhi; + MyPhi.push_back(KPlus); MyPhi.push_back(KMinus); + + //ANALYSIS ACTIONS: //Executed in order if added to dAnalysisActions + //false/true below: use measured/kinfit data + + //PID + dAnalysisActions.push_back(new DHistogramAction_ParticleID(dComboWrapper, false)); + //below: value: +/- N ns, Unknown: All PIDs, SYS_NULL: all timing systems + dAnalysisActions.push_back(new DCutAction_PIDDeltaT(dComboWrapper, false, 0.2, KPlus, SYS_TOF)); + dAnalysisActions.push_back(new DCutAction_PIDDeltaT(dComboWrapper, false, 0.2, KMinus, SYS_TOF)); + + //MASSES + dAnalysisActions.push_back(new DHistogramAction_MissingMassSquared(dComboWrapper, false, 1000, -0.1, 0.1)); + + //KINFIT RESULTS + dAnalysisActions.push_back(new DHistogramAction_KinFitResults(dComboWrapper)); + dAnalysisActions.push_back(new DCutAction_KinFitFOM(dComboWrapper, 0.001)); + + //CUT MISSING MASS + dAnalysisActions.push_back(new DCutAction_MissingMassSquared(dComboWrapper, false, -0.02, 0.02)); + + dAnalysisActions.push_back(new DHistogramAction_InvariantMass(dComboWrapper, false, 0, MyPhi, 500, 0.3, 1.5, "Phi")); + + //BEAM ENERGY + dAnalysisActions.push_back(new DHistogramAction_BeamEnergy(dComboWrapper, false)); + + //KINEMATICS + dAnalysisActions.push_back(new DHistogramAction_ParticleComboKinematics(dComboWrapper, false)); + + //INITIALIZE ACTIONS + //If you create any actions that you want to run manually (i.e. don't add to dAnalysisActions), be sure to initialize them here as well + Initialize_Actions(); + + /******************************** EXAMPLE USER INITIALIZATION: STAND-ALONE HISTOGRAMS *******************************/ + + //EXAMPLE MANUAL HISTOGRAMS: + dHist_MissingMassSquared = new TH1I("MissingMassSquared", ";Missing Mass Squared (GeV/c^{2})^{2}", 600, -0.06, 0.06); + dHist_BeamEnergy = new TH1I("BeamEnergy", ";Beam Energy (GeV)", 600, 0.0, 12.0); + + //DIRC HISTOGRAMS + dHist_KPlusDIRCXY = new TH2F("KPlusDIRCXY", "; X (cm); Y (cm)", 300, -150, 150, 300, -150, 150); + dHist_KMinusDIRCXY = new TH2F("KMinusDIRCXY", "; X (cm); Y (cm)", 300, -150, 150, 300, -150, 150); + dHist_KPlusDIRCThetaCVsP = new TH2F("KPlusDIRCThetaCVsP", "; P (GeV); #theta_{C}", 300, 0., 10., 300, 0., 60.); + dHist_KMinusDIRCThetaCVsP = new TH2F("KMinusDIRCThetaCVsP", "; P (GeV); #theta_{C}", 300, 0., 10., 300, 0., 60.); + dHist_Ldiff = new TH2F("Ldiff", "; Minus L_{#pi} - L_{K} ; Plus L_{#pi} - L_{K}", 200, -200, 200, 200, -200, 200); + +} + +Bool_t DSelector_kpkm::Process(Long64_t locEntry) +{ + // The Process() function is called for each entry in the tree. The entry argument + // specifies which entry in the currently loaded tree is to be processed. + // + // This function should contain the "body" of the analysis. It can contain + // simple or elaborate selection criteria, run algorithms on the data + // of the event and typically fill histograms. + // + // The processing can be stopped by calling Abort(). + // Use fStatus to set the return value of TTree::Process(). + // The return value is currently not used. + + //CALL THIS FIRST + DSelector::Process(locEntry); //Gets the data from the tree for the entry + //cout << "RUN " << Get_RunNumber() << ", EVENT " << Get_EventNumber() << endl; + //TLorentzVector locProductionX4 = Get_X4_Production(); + + /******************************************** GET POLARIZATION ORIENTATION ******************************************/ + + //Only if the run number changes + //RCDB environment must be setup in order for this to work! (Will return false otherwise) + UInt_t locRunNumber = Get_RunNumber(); + if(locRunNumber != dPreviousRunNumber) + { + dIsPolarizedFlag = dAnalysisUtilities.Get_IsPolarizedBeam(locRunNumber, dIsPARAFlag); + dPreviousRunNumber = locRunNumber; + } + + /********************************************* SETUP UNIQUENESS TRACKING ********************************************/ + + //ANALYSIS ACTIONS: Reset uniqueness tracking for each action + //For any actions that you are executing manually, be sure to call Reset_NewEvent() on them here + Reset_Actions_NewEvent(); + + /************************************************* LOOP OVER COMBOS *************************************************/ + + //Loop over combos + for(UInt_t loc_i = 0; loc_i < Get_NumCombos(); ++loc_i) + { + //Set branch array indices for combo and all combo particles + dComboWrapper->Set_ComboIndex(loc_i); + + // Is used to indicate when combos have been cut + if(dComboWrapper->Get_IsComboCut()) // Is false when tree originally created + continue; // Combo has been cut previously + + /********************************************** GET PARTICLE INDICES *********************************************/ + + //Used for tracking uniqueness when filling histograms, and for determining unused particles + + //Step 0 + Int_t locBeamID = dComboBeamWrapper->Get_BeamID(); + Int_t locKPlusTrackID = dKPlusWrapper->Get_TrackID(); + Int_t locKMinusTrackID = dKMinusWrapper->Get_TrackID(); + Int_t locProtonTrackID = dProtonWrapper->Get_TrackID(); + + /*********************************************** GET FOUR-MOMENTUM **********************************************/ + + // Get P4's: //is kinfit if kinfit performed, else is measured + //dTargetP4 is target p4 + //Step 0 + TLorentzVector locBeamP4 = dComboBeamWrapper->Get_P4(); + TLorentzVector locKPlusP4 = dKPlusWrapper->Get_P4(); + TLorentzVector locKMinusP4 = dKMinusWrapper->Get_P4(); + TLorentzVector locProtonP4 = dProtonWrapper->Get_P4(); + + // Get Measured P4's: + //Step 0 + TLorentzVector locBeamP4_Measured = dComboBeamWrapper->Get_P4_Measured(); + TLorentzVector locKPlusP4_Measured = dKPlusWrapper->Get_P4_Measured(); + TLorentzVector locKMinusP4_Measured = dKMinusWrapper->Get_P4_Measured(); + TLorentzVector locProtonP4_Measured = dProtonWrapper->Get_P4_Measured(); + + /********************************************* COMBINE FOUR-MOMENTUM ********************************************/ + + // DO YOUR STUFF HERE + + // Combine 4-vectors + TLorentzVector locMissingP4_Measured = locBeamP4_Measured + dTargetP4; + locMissingP4_Measured -= locKPlusP4_Measured + locKMinusP4_Measured + locProtonP4_Measured; + + /******************************************** EXECUTE ANALYSIS ACTIONS *******************************************/ + + // Loop through the analysis actions, executing them in order for the active particle combo + if(!Execute_Actions()) //if the active combo fails a cut, IsComboCutFlag automatically set + continue; + + //if you manually execute any actions, and it fails a cut, be sure to call: + //dComboWrapper->Set_IsComboCut(true); + + /**************************************** EXAMPLE: HISTOGRAM BEAM ENERGY *****************************************/ + + dHist_BeamEnergy->Fill(locBeamP4.E()); + + /************************************ EXAMPLE: HISTOGRAM MISSING MASS SQUARED ************************************/ + + //Missing Mass Squared + double locMissingMassSquared = locMissingP4_Measured.M2(); + dHist_MissingMassSquared->Fill(locMissingMassSquared); + + /************************************ EXAMPLE: DIRC HISTOGRAMS ************************************/ + // location on DIRC XY plane + double locKPlusDIRCX = dKPlusWrapper->Get_Track_ExtrapolatedX_DIRC(); + double locKPlusDIRCY = dKPlusWrapper->Get_Track_ExtrapolatedY_DIRC(); + double locKMinusDIRCX = dKMinusWrapper->Get_Track_ExtrapolatedX_DIRC(); + double locKMinusDIRCY = dKMinusWrapper->Get_Track_ExtrapolatedY_DIRC(); + dHist_KPlusDIRCXY->Fill(locKPlusDIRCX, locKPlusDIRCY); + dHist_KMinusDIRCXY->Fill(locKMinusDIRCX, locKMinusDIRCY); + + // DIRC performance variables (likelihood, theta_C), see code for more + // gluex_root_analysis/libraries/DSelector/DChargedTrackHypothesis.h + int locKPlusNumPhotons_DIRC = dKPlusWrapper->Get_Track_NumPhotons_DIRC(); + int locKMinusNumPhotons_DIRC = dKMinusWrapper->Get_Track_NumPhotons_DIRC(); + + // require a minimum number of detected photons + if(locKPlusNumPhotons_DIRC < 5 || locKMinusNumPhotons_DIRC < 5) + continue; + + double locKPlusThetaC_DIRC = dKPlusWrapper->Get_Track_ThetaC_DIRC()*TMath::RadToDeg(); + double locKMinusThetaC_DIRC = dKMinusWrapper->Get_Track_ThetaC_DIRC()*TMath::RadToDeg(); + double locKPlusP = locKPlusP4.Vect().Mag(); + double locKMinusP = locKMinusP4.Vect().Mag(); + dHist_KPlusDIRCThetaCVsP->Fill(locKPlusP, locKPlusThetaC_DIRC); + dHist_KMinusDIRCThetaCVsP->Fill(locKMinusP, locKMinusThetaC_DIRC); + + double locKPlusLdiff_DIRC = dKPlusWrapper->Get_Track_Lpi_DIRC() - dKPlusWrapper->Get_Track_Lk_DIRC(); + double locKMinusLdiff_DIRC = dKMinusWrapper->Get_Track_Lpi_DIRC() - dKMinusWrapper->Get_Track_Lk_DIRC(); + + dHist_Ldiff->Fill(locKPlusLdiff_DIRC, locKMinusLdiff_DIRC); + + } // end of combo loop + + //FILL HISTOGRAMS: Num combos / events surviving actions + Fill_NumCombosSurvivedHists(); + + return kTRUE; +} + +void DSelector_kpkm::Finalize(void) +{ + //Save anything to output here that you do not want to be in the default DSelector output ROOT file. + + //Otherwise, don't do anything else (especially if you are using PROOF). + //If you are using PROOF, this function is called on each thread, + //so anything you do will not have the combined information from the various threads. + //Besides, it is best-practice to do post-processing (e.g. fitting) separately, in case there is a problem. + + //DO YOUR STUFF HERE + + //CALL THIS LAST + DSelector::Finalize(); //Saves results to the output file +} diff --git a/dirc/dselector/DSelector_kpkm.h b/dirc/dselector/DSelector_kpkm.h new file mode 100644 index 00000000..0b5b5bb6 --- /dev/null +++ b/dirc/dselector/DSelector_kpkm.h @@ -0,0 +1,63 @@ +#ifndef DSelector_kpkm_h +#define DSelector_kpkm_h + +#include + +#include "DSelector/DSelector.h" +#include "DSelector/DHistogramActions.h" +#include "DSelector/DCutActions.h" + +#include "TH1I.h" +#include "TH2I.h" + +class DSelector_kpkm : public DSelector +{ + public: + + DSelector_kpkm(TTree* locTree = NULL) : DSelector(locTree){} + virtual ~DSelector_kpkm(){} + + void Init(TTree *tree); + Bool_t Process(Long64_t entry); + + private: + + void Get_ComboWrappers(void); + void Finalize(void); + + // BEAM POLARIZATION INFORMATION + UInt_t dPreviousRunNumber; + bool dIsPolarizedFlag; //else is AMO + bool dIsPARAFlag; //else is PERP or AMO + + //CREATE REACTION-SPECIFIC PARTICLE ARRAYS + + //Step 0 + DParticleComboStep* dStep0Wrapper; + DBeamParticle* dComboBeamWrapper; + DChargedTrackHypothesis* dKPlusWrapper; + DChargedTrackHypothesis* dKMinusWrapper; + DChargedTrackHypothesis* dProtonWrapper; + + // DEFINE YOUR HISTOGRAMS HERE + // EXAMPLES: + TH1I* dHist_MissingMassSquared; + TH1I* dHist_BeamEnergy; + TH2F* dHist_KPlusDIRCXY, *dHist_KMinusDIRCXY; + TH2F* dHist_KPlusDIRCThetaCVsP, *dHist_KMinusDIRCThetaCVsP; + TH2F *dHist_Ldiff; + + ClassDef(DSelector_kpkm, 0); +}; + +void DSelector_kpkm::Get_ComboWrappers(void) +{ + //Step 0 + dStep0Wrapper = dComboWrapper->Get_ParticleComboStep(0); + dComboBeamWrapper = static_cast(dStep0Wrapper->Get_InitialParticle()); + dKPlusWrapper = static_cast(dStep0Wrapper->Get_FinalParticle(0)); + dKMinusWrapper = static_cast(dStep0Wrapper->Get_FinalParticle(1)); + dProtonWrapper = static_cast(dStep0Wrapper->Get_FinalParticle(2)); +} + +#endif // DSelector_kpkm_h diff --git a/dirc/dselector/DSelector_pippim.C b/dirc/dselector/DSelector_pippim.C index 8e5a9679..55378c8a 100644 --- a/dirc/dselector/DSelector_pippim.C +++ b/dirc/dselector/DSelector_pippim.C @@ -30,8 +30,8 @@ void DSelector_pippim::Init(TTree *locTree) // EXAMPLE: Create deque for histogramming particle masses: // // For histogramming the phi mass in phi -> K+ K- // // Be sure to change this and dAnalyzeCutActions to match reaction - std::deque MyPhi; - MyPhi.push_back(KPlus); MyPhi.push_back(KMinus); + std::deque MyRho; + MyRho.push_back(PiPlus); MyRho.push_back(PiMinus); //ANALYSIS ACTIONS: //Executed in order if added to dAnalysisActions //false/true below: use measured/kinfit data @@ -39,33 +39,30 @@ void DSelector_pippim::Init(TTree *locTree) //PID dAnalysisActions.push_back(new DHistogramAction_ParticleID(dComboWrapper, false)); //below: value: +/- N ns, Unknown: All PIDs, SYS_NULL: all timing systems - //dAnalysisActions.push_back(new DCutAction_PIDDeltaT(dComboWrapper, false, 0.5, KPlus, SYS_BCAL)); + dAnalysisActions.push_back(new DCutAction_PIDDeltaT(dComboWrapper, false, 0.2, PiPlus, SYS_TOF)); + dAnalysisActions.push_back(new DCutAction_PIDDeltaT(dComboWrapper, false, 0.2, PiMinus, SYS_TOF)); //MASSES - //dAnalysisActions.push_back(new DHistogramAction_InvariantMass(dComboWrapper, false, Lambda, 1000, 1.0, 1.2, "Lambda")); - //dAnalysisActions.push_back(new DHistogramAction_MissingMassSquared(dComboWrapper, false, 1000, -0.1, 0.1)); + dAnalysisActions.push_back(new DHistogramAction_MissingMassSquared(dComboWrapper, false, 1000, -0.1, 0.1)); //KINFIT RESULTS dAnalysisActions.push_back(new DHistogramAction_KinFitResults(dComboWrapper)); + dAnalysisActions.push_back(new DCutAction_KinFitFOM(dComboWrapper, 0.001)); //CUT MISSING MASS - //dAnalysisActions.push_back(new DCutAction_MissingMassSquared(dComboWrapper, false, -0.03, 0.02)); + dAnalysisActions.push_back(new DCutAction_MissingMassSquared(dComboWrapper, false, -0.02, 0.02)); + + dAnalysisActions.push_back(new DHistogramAction_InvariantMass(dComboWrapper, false, 0, MyRho, 500, 0.3, 1.5, "Rho")); //BEAM ENERGY dAnalysisActions.push_back(new DHistogramAction_BeamEnergy(dComboWrapper, false)); - //dAnalysisActions.push_back(new DCutAction_BeamEnergy(dComboWrapper, false, 8.4, 9.05)); //KINEMATICS dAnalysisActions.push_back(new DHistogramAction_ParticleComboKinematics(dComboWrapper, false)); - // ANALYZE CUT ACTIONS - // // Change MyPhi to match reaction - dAnalyzeCutActions = new DHistogramAction_AnalyzeCutActions( dAnalysisActions, dComboWrapper, false, 0, MyPhi, 1000, 0.9, 2.4, "CutActionEffect" ); - //INITIALIZE ACTIONS //If you create any actions that you want to run manually (i.e. don't add to dAnalysisActions), be sure to initialize them here as well Initialize_Actions(); - dAnalyzeCutActions->Initialize(); // manual action, must call Initialize() /******************************** EXAMPLE USER INITIALIZATION: STAND-ALONE HISTOGRAMS *******************************/ @@ -73,43 +70,13 @@ void DSelector_pippim::Init(TTree *locTree) dHist_MissingMassSquared = new TH1I("MissingMassSquared", ";Missing Mass Squared (GeV/c^{2})^{2}", 600, -0.06, 0.06); dHist_BeamEnergy = new TH1I("BeamEnergy", ";Beam Energy (GeV)", 600, 0.0, 12.0); - /************************** EXAMPLE USER INITIALIZATION: CUSTOM OUTPUT BRANCHES - MAIN TREE *************************/ - - //EXAMPLE MAIN TREE CUSTOM BRANCHES (OUTPUT ROOT FILE NAME MUST FIRST BE GIVEN!!!! (ABOVE: TOP)): - //The type for the branch must be included in the brackets - //1st function argument is the name of the branch - //2nd function argument is the name of the branch that contains the size of the array (for fundamentals only) - /* - dTreeInterface->Create_Branch_Fundamental("my_int"); //fundamental = char, int, float, double, etc. - dTreeInterface->Create_Branch_FundamentalArray("my_int_array", "my_int"); - dTreeInterface->Create_Branch_FundamentalArray("my_combo_array", "NumCombos"); - dTreeInterface->Create_Branch_NoSplitTObject("my_p4"); - dTreeInterface->Create_Branch_ClonesArray("my_p4_array"); - */ - - /************************** EXAMPLE USER INITIALIZATION: CUSTOM OUTPUT BRANCHES - FLAT TREE *************************/ - - //EXAMPLE FLAT TREE CUSTOM BRANCHES (OUTPUT ROOT FILE NAME MUST FIRST BE GIVEN!!!! (ABOVE: TOP)): - //The type for the branch must be included in the brackets - //1st function argument is the name of the branch - //2nd function argument is the name of the branch that contains the size of the array (for fundamentals only) - /* - dFlatTreeInterface->Create_Branch_Fundamental("flat_my_int"); //fundamental = char, int, float, double, etc. - dFlatTreeInterface->Create_Branch_FundamentalArray("flat_my_int_array", "flat_my_int"); - dFlatTreeInterface->Create_Branch_NoSplitTObject("flat_my_p4"); - dFlatTreeInterface->Create_Branch_ClonesArray("flat_my_p4_array"); - */ - - /************************************* ADVANCED EXAMPLE: CHOOSE BRANCHES TO READ ************************************/ - - //TO SAVE PROCESSING TIME - //If you know you don't need all of the branches/data, but just a subset of it, you can speed things up - //By default, for each event, the data is retrieved for all branches - //If you know you only need data for some branches, you can skip grabbing data from the branches you don't need - //Do this by doing something similar to the commented code below - - //dTreeInterface->Clear_GetEntryBranches(); //now get none - //dTreeInterface->Register_GetEntryBranch("Proton__P4"); //manually set the branches you want + //DIRC HISTOGRAMS + dHist_PiPlusDIRCXY = new TH2F("PiPlusDIRCXY", "; X (cm); Y (cm)", 300, -150, 150, 300, -150, 150); + dHist_PiMinusDIRCXY = new TH2F("PiMinusDIRCXY", "; X (cm); Y (cm)", 300, -150, 150, 300, -150, 150); + dHist_PiPlusDIRCThetaCVsP = new TH2F("PiPlusDIRCThetaCVsP", "; P (GeV); #theta_{C}", 300, 0., 10., 300, 0., 60.); + dHist_PiMinusDIRCThetaCVsP = new TH2F("PiMinusDIRCThetaCVsP", "; P (GeV); #theta_{C}", 300, 0., 10., 300, 0., 60.); + dHist_Ldiff = new TH2F("Ldiff", "; Minus L_{#pi} - L_{K} ; Plus L_{#pi} - L_{K}", 200, -200, 200, 200, -200, 200); + } Bool_t DSelector_pippim::Process(Long64_t locEntry) @@ -146,38 +113,6 @@ Bool_t DSelector_pippim::Process(Long64_t locEntry) //ANALYSIS ACTIONS: Reset uniqueness tracking for each action //For any actions that you are executing manually, be sure to call Reset_NewEvent() on them here Reset_Actions_NewEvent(); - dAnalyzeCutActions->Reset_NewEvent(); // manual action, must call Reset_NewEvent() - - //PREVENT-DOUBLE COUNTING WHEN HISTOGRAMMING - //Sometimes, some content is the exact same between one combo and the next - //e.g. maybe two combos have different beam particles, but the same data for the final-state - //When histogramming, you don't want to double-count when this happens: artificially inflates your signal (or background) - //So, for each quantity you histogram, keep track of what particles you used (for a given combo) - //Then for each combo, just compare to what you used before, and make sure it's unique - - //EXAMPLE 1: Particle-specific info: - set locUsedSoFar_BeamEnergy; //Int_t: Unique ID for beam particles. set: easy to use, fast to search - - //EXAMPLE 2: Combo-specific info: - //In general: Could have multiple particles with the same PID: Use a set of Int_t's - //In general: Multiple PIDs, so multiple sets: Contain within a map - //Multiple combos: Contain maps within a set (easier, faster to search) - set > > locUsedSoFar_MissingMass; - - //INSERT USER ANALYSIS UNIQUENESS TRACKING HERE - - /**************************************** EXAMPLE: FILL CUSTOM OUTPUT BRANCHES **************************************/ - - /* - Int_t locMyInt = 7; - dTreeInterface->Fill_Fundamental("my_int", locMyInt); - - TLorentzVector locMyP4(4.0, 3.0, 2.0, 1.0); - dTreeInterface->Fill_TObject("my_p4", locMyP4); - - for(int loc_i = 0; loc_i < locMyInt; ++loc_i) - dTreeInterface->Fill_Fundamental("my_int_array", 3*loc_i, loc_i); //2nd argument = value, 3rd = array index - */ /************************************************* LOOP OVER COMBOS *************************************************/ @@ -229,147 +164,57 @@ Bool_t DSelector_pippim::Process(Long64_t locEntry) /******************************************** EXECUTE ANALYSIS ACTIONS *******************************************/ // Loop through the analysis actions, executing them in order for the active particle combo - dAnalyzeCutActions->Perform_Action(); // Must be executed before Execute_Actions() if(!Execute_Actions()) //if the active combo fails a cut, IsComboCutFlag automatically set continue; //if you manually execute any actions, and it fails a cut, be sure to call: //dComboWrapper->Set_IsComboCut(true); - /**************************************** EXAMPLE: FILL CUSTOM OUTPUT BRANCHES **************************************/ - - /* - TLorentzVector locMyComboP4(8.0, 7.0, 6.0, 5.0); - //for arrays below: 2nd argument is value, 3rd is array index - //NOTE: By filling here, AFTER the cuts above, some indices won't be updated (and will be whatever they were from the last event) - //So, when you draw the branch, be sure to cut on "IsComboCut" to avoid these. - dTreeInterface->Fill_Fundamental("my_combo_array", -2*loc_i, loc_i); - dTreeInterface->Fill_TObject("my_p4_array", locMyComboP4, loc_i); - */ - /**************************************** EXAMPLE: HISTOGRAM BEAM ENERGY *****************************************/ - - //Histogram beam energy (if haven't already) - if(locUsedSoFar_BeamEnergy.find(locBeamID) == locUsedSoFar_BeamEnergy.end()) - { - dHist_BeamEnergy->Fill(locBeamP4.E()); - locUsedSoFar_BeamEnergy.insert(locBeamID); - } + + dHist_BeamEnergy->Fill(locBeamP4.E()); /************************************ EXAMPLE: HISTOGRAM MISSING MASS SQUARED ************************************/ //Missing Mass Squared double locMissingMassSquared = locMissingP4_Measured.M2(); + dHist_MissingMassSquared->Fill(locMissingMassSquared); + + /************************************ EXAMPLE: DIRC HISTOGRAMS ************************************/ + // location on DIRC XY plane + double locPiPlusDIRCX = dPiPlusWrapper->Get_Track_ExtrapolatedX_DIRC(); + double locPiPlusDIRCY = dPiPlusWrapper->Get_Track_ExtrapolatedY_DIRC(); + double locPiMinusDIRCX = dPiMinusWrapper->Get_Track_ExtrapolatedX_DIRC(); + double locPiMinusDIRCY = dPiMinusWrapper->Get_Track_ExtrapolatedY_DIRC(); + dHist_PiPlusDIRCXY->Fill(locPiPlusDIRCX, locPiPlusDIRCY); + dHist_PiMinusDIRCXY->Fill(locPiMinusDIRCX, locPiMinusDIRCY); + + // DIRC performance variables (likelihood, theta_C), see code for more + // gluex_root_analysis/libraries/DSelector/DChargedTrackHypothesis.h + int locPiPlusNumPhotons_DIRC = dPiPlusWrapper->Get_Track_NumPhotons_DIRC(); + int locPiMinusNumPhotons_DIRC = dPiMinusWrapper->Get_Track_NumPhotons_DIRC(); + + // require a minimum number of detected photons + if(locPiPlusNumPhotons_DIRC < 5 || locPiMinusNumPhotons_DIRC < 5) + continue; + + double locPiPlusThetaC_DIRC = dPiPlusWrapper->Get_Track_ThetaC_DIRC()*TMath::RadToDeg(); + double locPiMinusThetaC_DIRC = dPiMinusWrapper->Get_Track_ThetaC_DIRC()*TMath::RadToDeg(); + double locPiPlusP = locPiPlusP4.Vect().Mag(); + double locPiMinusP = locPiMinusP4.Vect().Mag(); + dHist_PiPlusDIRCThetaCVsP->Fill(locPiPlusP, locPiPlusThetaC_DIRC); + dHist_PiMinusDIRCThetaCVsP->Fill(locPiMinusP, locPiMinusThetaC_DIRC); + + double locPiPlusLdiff_DIRC = dPiPlusWrapper->Get_Track_Lpi_DIRC() - dPiPlusWrapper->Get_Track_Lk_DIRC(); + double locPiMinusLdiff_DIRC = dPiMinusWrapper->Get_Track_Lpi_DIRC() - dPiMinusWrapper->Get_Track_Lk_DIRC(); + + dHist_Ldiff->Fill(locPiPlusLdiff_DIRC, locPiMinusLdiff_DIRC); - //Uniqueness tracking: Build the map of particles used for the missing mass - //For beam: Don't want to group with final-state photons. Instead use "Unknown" PID (not ideal, but it's easy). - map > locUsedThisCombo_MissingMass; - locUsedThisCombo_MissingMass[Unknown].insert(locBeamID); //beam - locUsedThisCombo_MissingMass[PiPlus].insert(locPiPlusTrackID); - locUsedThisCombo_MissingMass[PiMinus].insert(locPiMinusTrackID); - locUsedThisCombo_MissingMass[Proton].insert(locProtonTrackID); - - //compare to what's been used so far - if(locUsedSoFar_MissingMass.find(locUsedThisCombo_MissingMass) == locUsedSoFar_MissingMass.end()) - { - //unique missing mass combo: histogram it, and register this combo of particles - dHist_MissingMassSquared->Fill(locMissingMassSquared); - locUsedSoFar_MissingMass.insert(locUsedThisCombo_MissingMass); - } - - //E.g. Cut - //if((locMissingMassSquared < -0.04) || (locMissingMassSquared > 0.04)) - //{ - // dComboWrapper->Set_IsComboCut(true); - // continue; - //} - - /****************************************** FILL FLAT TREE (IF DESIRED) ******************************************/ - - /* - //FILL ANY CUSTOM BRANCHES FIRST!! - Int_t locMyInt_Flat = 7; - dFlatTreeInterface->Fill_Fundamental("flat_my_int", locMyInt_Flat); - - TLorentzVector locMyP4_Flat(4.0, 3.0, 2.0, 1.0); - dFlatTreeInterface->Fill_TObject("flat_my_p4", locMyP4_Flat); - - for(int loc_j = 0; loc_j < locMyInt_Flat; ++loc_j) - { - dFlatTreeInterface->Fill_Fundamental("flat_my_int_array", 3*loc_j, loc_j); //2nd argument = value, 3rd = array index - TLorentzVector locMyComboP4_Flat(8.0, 7.0, 6.0, 5.0); - dFlatTreeInterface->Fill_TObject("flat_my_p4_array", locMyComboP4_Flat, loc_j); - } - */ - - //FILL FLAT TREE - //Fill_FlatTree(); //for the active combo } // end of combo loop //FILL HISTOGRAMS: Num combos / events surviving actions Fill_NumCombosSurvivedHists(); - /******************************************* LOOP OVER THROWN DATA (OPTIONAL) ***************************************/ -/* - //Thrown beam: just use directly - if(dThrownBeam != NULL) - double locEnergy = dThrownBeam->Get_P4().E(); - - //Loop over throwns - for(UInt_t loc_i = 0; loc_i < Get_NumThrown(); ++loc_i) - { - //Set branch array indices corresponding to this particle - dThrownWrapper->Set_ArrayIndex(loc_i); - - //Do stuff with the wrapper here ... - } -*/ - /****************************************** LOOP OVER OTHER ARRAYS (OPTIONAL) ***************************************/ -/* - //Loop over beam particles (note, only those appearing in combos are present) - for(UInt_t loc_i = 0; loc_i < Get_NumBeam(); ++loc_i) - { - //Set branch array indices corresponding to this particle - dBeamWrapper->Set_ArrayIndex(loc_i); - - //Do stuff with the wrapper here ... - } - - //Loop over charged track hypotheses - for(UInt_t loc_i = 0; loc_i < Get_NumChargedHypos(); ++loc_i) - { - //Set branch array indices corresponding to this particle - dChargedHypoWrapper->Set_ArrayIndex(loc_i); - - //Do stuff with the wrapper here ... - } - - //Loop over neutral particle hypotheses - for(UInt_t loc_i = 0; loc_i < Get_NumNeutralHypos(); ++loc_i) - { - //Set branch array indices corresponding to this particle - dNeutralHypoWrapper->Set_ArrayIndex(loc_i); - - //Do stuff with the wrapper here ... - } -*/ - - /************************************ EXAMPLE: FILL CLONE OF TTREE HERE WITH CUTS APPLIED ************************************/ -/* - Bool_t locIsEventCut = true; - for(UInt_t loc_i = 0; loc_i < Get_NumCombos(); ++loc_i) { - //Set branch array indices for combo and all combo particles - dComboWrapper->Set_ComboIndex(loc_i); - // Is used to indicate when combos have been cut - if(dComboWrapper->Get_IsComboCut()) - continue; - locIsEventCut = false; // At least one combo succeeded - break; - } - if(!locIsEventCut && dOutputTreeFileName != "") - Fill_OutputTree(); -*/ - return kTRUE; } diff --git a/dirc/dselector/DSelector_pippim.h b/dirc/dselector/DSelector_pippim.h index 8045cce6..0da73429 100644 --- a/dirc/dselector/DSelector_pippim.h +++ b/dirc/dselector/DSelector_pippim.h @@ -30,10 +30,6 @@ class DSelector_pippim : public DSelector bool dIsPolarizedFlag; //else is AMO bool dIsPARAFlag; //else is PERP or AMO - // ANALYZE CUT ACTIONS - // // Automatically makes mass histograms where one cut is missing - DHistogramAction_AnalyzeCutActions* dAnalyzeCutActions; - //CREATE REACTION-SPECIFIC PARTICLE ARRAYS //Step 0 @@ -47,6 +43,9 @@ class DSelector_pippim : public DSelector // EXAMPLES: TH1I* dHist_MissingMassSquared; TH1I* dHist_BeamEnergy; + TH2F* dHist_PiPlusDIRCXY, *dHist_PiMinusDIRCXY; + TH2F* dHist_PiPlusDIRCThetaCVsP, *dHist_PiMinusDIRCThetaCVsP; + TH2F *dHist_Ldiff; ClassDef(DSelector_pippim, 0); }; diff --git a/dirc/dselector/runSelector.C b/dirc/dselector/runSelector.C new file mode 100644 index 00000000..74bf75d5 --- /dev/null +++ b/dirc/dselector/runSelector.C @@ -0,0 +1,56 @@ +// macro to process analysis TTree with TSelector +#include + +#include "TFile.h" +#include "TTree.h" +#include "TString.h" +#include "TSystem.h" + +void runSelector(TString runNumber = "72422", TString myPath = "/cache/halld/RunPeriod-2019-11/analysis/ver01/tree_kpkm__B4/merged/") +{ + // Load DSelector library + gROOT->ProcessLine(".x $(ROOT_ANALYSIS_HOME)/scripts/Load_DSelector.C"); + int Proof_Nthreads = 8; + + // process signal + TString sampleDir = myPath; + cout<<"running selector on files in: "<GetName(); + if(fileName.Contains(runNumber)) { + cout< skipping"< skipping"<Add(sampleDir+fileName); + ifile++; + } + } + + cout<<"total entries in TChain = "<GetEntries()<<" from "< +#include +#include "TPaletteAxis.h" +#include +#include +#include "TEventList.h" +#include "TCut.h" +#include "TEntryList.h" + +#include +#include +#include + +using namespace std; + +#ifdef glx__sim +class DrcEvent; +class DrcHit; +DrcEvent* glx_event(0); +#endif + +Int_t glx_geometry=0; +TRandom glx_random; + +const Int_t glx_nrow(6),glx_ncol(18); +const Int_t glx_npmt(glx_nrow*glx_ncol); +const Int_t glx_npix(64); +const Int_t glx_maxch(glx_npmt*glx_npix); +const Int_t glx_nch(glx_npmt*glx_npix); +const Int_t glx_npixtot(glx_npmt*glx_npix*2); // 2 optical box +TEntryList *glx_elist; + +TChain* glx_ch(0); +//TTree* glx_ch(0); +Int_t glx_entries(0), glx_momentum(0),glx_pdg(0),glx_test1(0),glx_test2(0),glx_last_maxz,glx_last_minz; +Double_t glx_theta(0),glx_phi(0); +TString glx_savepath(""), glx_info(""); +TH2F* glx_hdigi[glx_npmt]; +TClonesArray* glx_events; +int glx_apdg[]={11,13,211,321,2212}; +double glx_mass[] = {0.000511,0.1056584,0.139570,0.49368,0.9382723}; +TString glx_names[] = {"electron","muon","pion","kaon","proton"}; + +Int_t map_mpc[glx_maxch/64][glx_npix]; +Int_t map_pmt[glx_maxch]; +Int_t map_pix[glx_maxch]; +Int_t map_row[glx_maxch]; +Int_t map_col[glx_maxch]; +Int_t map_ssp_slot[glx_maxch]; +Int_t map_ssp_fiber[glx_maxch]; +Double_t glx_particleArray[3000]; + +void glx_writeString(TString filename, TString str){ + ofstream myfile; + myfile.open (filename); + myfile << str+"\n"; + myfile.close(); +} + +void glx_createMap(){ + for(Int_t ch=0; ch53) br+=18; + + if((br>8 && br<18) || br>21) slot=5; + + if(br<4) br=12+br%4; + else if(br<8) br=20+br%4; + else if(br<12) br=4+br%4; + else if(br<16) br=12+br%4; + else if(br<18) br=20+br%4; + else if(br<22) br=16+(br-2)%4; + else if(br<26) br=(br-2)%4; + else if(br<30) br=8+(br-2)%4; + else if(br<34) br=16+(br-2)%4; + else if(br<36) br=22+(br-2)%4; + + map_mpc[pmt][pix]=ch; + map_pmt[ch] = pmt; + map_pix[ch] = pix; + map_row[ch] = row; + map_col[ch] = col; + map_ssp_slot[ch] = slot; + map_ssp_fiber[ch] = br; + } + + for(Int_t i=0; i<5; i++){ + glx_particleArray[glx_apdg[i]]=i; + } +} + +Int_t glx_getChNum(Int_t npmt, Int_t npix){ + Int_t ch = -1; + ch = 64*npmt+npix; + return ch; +} + +TString glx_randstr(Int_t len = 10){ + TString str = ""; + static const char alphanum[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + + for (int i = 0; i < len; ++i) { + str += alphanum[rand() % (sizeof(alphanum) - 1)]; + } + return str; +} + +void glx_drawMap(TH2F *map[], double max){ + + TH2F *hmap = new TH2F("hmap",";x [cm];y [cm]",200,-105,105,200,-105,105); + double tt =200; + hmap->Fill(tt,tt,tt); + hmap->GetZaxis()->SetRangeUser(0,max); + hmap->Draw("colz"); + + double pos[] = {0.382,0.185,0.618,0.815}; + double w = 0.085; + TPad * pbox[4]; + for(int i=0; i<4; i++){ + pbox[i] = new TPad(Form("P%d",i),"T", 0.1,pos[i]-w,0.9,pos[i]+w, 21); + pbox[i]->SetFillStyle(0); + pbox[i]->SetMargin(0.0,0.0,0.0,0.0); + pbox[i]->SetFillColor(0); + pbox[i]->Draw(); + + map[i]->GetXaxis()->SetNdivisions(0); + map[i]->GetYaxis()->SetNdivisions(1); + map[i]->GetZaxis()->SetNdivisions(0); + map[i]->GetXaxis()->SetLabelOffset(0); + map[i]->GetYaxis()->SetLabelOffset(0); + map[i]->GetZaxis()->SetLabelOffset(0); + map[i]->GetXaxis()->SetTickLength(0); + map[i]->GetYaxis()->SetTickLength(1); + // map[i]->GetXaxis()->SetAxisColor(15); + // map[i]->GetYaxis()->SetAxisColor(15); + } + + + for(int i=0; i<4; i++){ + pbox[i]->cd(); + map[i]->GetZaxis()->SetRangeUser(0,max); + map[i]->Draw("col"); + gPad->Update(); + + pbox[i]->GetFrame()->SetLineWidth(0); + pbox[i]->GetFrame()->SetLineColor(0); + pbox[i]->GetFrame()->SetBorderMode(0); + pbox[i]->GetFrame()->SetFillStyle(0); + + } +} + +TCanvas *glx_drawDigi(Double_t maxz = 0, Double_t minz = 0){ + + glx_last_maxz = maxz; + glx_last_minz = minz; + TString sid = glx_randstr(3); + auto cdigi = new TCanvas("hp="+sid,"hp_"+sid,800,350); + + TPad* glx_hpads[glx_npmt]; + auto glx_hpglobal = new TPad("P","T",0.005,0.1,0.95,0.9); + glx_hpglobal->SetFillStyle(0); + glx_hpglobal->Draw(); + glx_hpglobal->cd(); + + int nrow(glx_nrow), ncol(glx_ncol); + float bw = 0.001, bh = 0.005; + + float margin = 0.1; + float shift = 0; + float shifth = 0, shiftw = 0.01; + float tbw = 0.001; + float tbh = 0.005; + + int padi = 0; + for(int i=0; iSetFillColor(kCyan-10); + + // glx_hpads[padi] = new TPad( + // Form("P%d", padi), "T", i / (ncol + 2 * margin) + tbw + shift + shiftw, + // j / (double)nrow + tbh + shifth, (i + 1) / (ncol + 2 * margin) - tbw + shift + shiftw, + // (1 + j) / (double)nrow - tbh + shifth, 21); + + glx_hpads[padi]->SetMargin(0.04,0.04,0.04,0.04); + // if((j+1)%6 != 0) + glx_hpads[padi]->Draw(); + padi++; + } + } + + Double_t tmax; + Double_t max=0; + if(maxz==0){ + for(Int_t p=0; pGetMaximum(); + if(maxGetMaximum(); + if(maxGetBinContent(i); + if(val!=0) h->Fill(val); + } + } + Double_t integral; + for(Int_t i=0; iIntegral(0,i); + if(integral>5) { + minz = h->GetBinCenter(i); + break; + } + } + + for(Int_t i=tbins; i>0; i--){ + integral = h->Integral(i,tbins); + if(integral>5) { + max = h->GetBinCenter(i); + break; + } + } + } + + Int_t nnmax(0); + glx_hdigi[nnmax]->GetZaxis()->SetLabelSize(0.06); + + TString digidata = "m,p,v\n"; + for(Int_t m=0; mcd(); + glx_hpads[m]->SetName(Form("p_%d",nm)); + glx_hdigi[nm]->Draw("col"); + if(maxz==-1) max = glx_hdigi[nm]->GetBinContent(glx_hdigi[nm]->GetMaximumBin()); + if(nnmaxGetEntries()) nnmax=nm; + glx_hdigi[nm]->SetMaximum(max); + glx_hdigi[nm]->SetMinimum(minz); + + nm = m % glx_ncol * 6 + m / glx_ncol; + for (Int_t i = 1; i <= 8; i++) { + for (Int_t j = 1; j <= 8; j++) { + Double_t weight = (double)(glx_hdigi[nm]->GetBinContent(j, i)) / (double)max * 255; + if (weight > 0) digidata += Form("%d,%d,%d\n", nm, (i - 1) * 8 + j - 1, (Int_t)weight); + } + } + } + glx_writeString("digi_pix.csv",digidata); + + // nnmax++; //! + cdigi->cd(); + glx_hdigi[nnmax]->GetZaxis()->SetLabelSize(0.04); + glx_hdigi[nnmax]->GetZaxis()->SetTickLength(0.01); + auto glx_palette = new TPaletteAxis(0.952,0.1,0.962,0.90,(TH1 *)glx_hdigi[nnmax]); + glx_palette->Draw(); + + cdigi->Modified(); + cdigi->Update(); + + return cdigi; +} + +void glx_initDigi(Int_t type=0){ + TGaxis::SetMaxDigits(3); + if(type == 0){ + for(Int_t m=0; mSetStats(0); + glx_hdigi[m]->SetTitle(0); + glx_hdigi[m]->GetXaxis()->SetNdivisions(10); + glx_hdigi[m]->GetYaxis()->SetNdivisions(10); + glx_hdigi[m]->GetXaxis()->SetLabelOffset(100); + glx_hdigi[m]->GetYaxis()->SetLabelOffset(100); + glx_hdigi[m]->GetXaxis()->SetTickLength(1); + glx_hdigi[m]->GetYaxis()->SetTickLength(1); + glx_hdigi[m]->GetXaxis()->SetAxisColor(15); + glx_hdigi[m]->GetYaxis()->SetAxisColor(15); + } + } +} + +void glx_resetDigi(){ + for(Int_t m=0; mReset("M"); + } +} + +void glx_setPrettyStyle(){ + // Canvas printing details: white bg, no borders. + gStyle->SetCanvasColor(0); + gStyle->SetCanvasBorderMode(0); + gStyle->SetCanvasBorderSize(0); + + // Canvas frame printing details: white bg, no borders. + gStyle->SetFrameFillColor(0); + gStyle->SetFrameBorderMode(0); + gStyle->SetFrameBorderSize(0); + + // Plot title details: centered, no bg, no border, nice font. + gStyle->SetTitleX(0.1); + gStyle->SetTitleW(0.8); + gStyle->SetTitleBorderSize(0); + gStyle->SetTitleFillColor(0); + + // Font details for titles and labels. + gStyle->SetTitleFont(42, "xyz"); + gStyle->SetTitleFont(42, "pad"); + gStyle->SetLabelFont(42, "xyz"); + gStyle->SetLabelFont(42, "pad"); + + // Details for stat box. + gStyle->SetStatColor(0); + gStyle->SetStatFont(42); + gStyle->SetStatBorderSize(1); + gStyle->SetStatX(0.975); + gStyle->SetStatY(0.9); + + // gStyle->SetOptStat(0); +} + +void glx_setRootPalette(Int_t pal = 0){ + + // pal = 1: rainbow\n" + // pal = 2: reverse-rainbow\n" + // pal = 3: amber\n" + // pal = 4: reverse-amber\n" + // pal = 5: blue/white\n" + // pal = 6: white/blue\n" + // pal = 7: red temperature\n" + // pal = 8: reverse-red temperature\n" + // pal = 9: green/white\n" + // pal = 10: white/green\n" + // pal = 11: orange/blue\n" + // pal = 12: blue/orange\n" + // pal = 13: white/black\n" + // pal = 14: black/white\n" + + const Int_t NRGBs = 5; + const Int_t NCont = 255; + gStyle->SetNumberContours(NCont); + + if (pal < 1 && pal> 14) return; + else pal--; + + Double_t stops[NRGBs] = { 0.00, 0.34, 0.61, 0.84, 1.00 }; + Double_t red[14][NRGBs] = {{ 0.00, 0.00, 0.87, 1.00, 0.51 }, + { 0.51, 1.00, 0.87, 0.00, 0.00 }, + { 0.17, 0.39, 0.62, 0.79, 1.00 }, + { 1.00, 0.79, 0.62, 0.39, 0.17 }, + { 0.00, 0.00, 0.00, 0.38, 1.00 }, + { 1.00, 0.38, 0.00, 0.00, 0.00 }, + { 0.00, 0.50, 0.89, 0.95, 1.00 }, + { 1.00, 0.95, 0.89, 0.50, 0.00 }, + { 0.00, 0.00, 0.38, 0.75, 1.00 }, + { 0.00, 0.34, 0.61, 0.84, 1.00 }, + { 0.75, 1.00, 0.24, 0.00, 0.00 }, + { 0.00, 0.00, 0.24, 1.00, 0.75 }, + { 0.00, 0.34, 0.61, 0.84, 1.00 }, + { 1.00, 0.84, 0.61, 0.34, 0.00 } + }; + Double_t green[14][NRGBs] = {{ 0.00, 0.81, 1.00, 0.20, 0.00 }, + { 0.00, 0.20, 1.00, 0.81, 0.00 }, + { 0.01, 0.02, 0.39, 0.68, 1.00 }, + { 1.00, 0.68, 0.39, 0.02, 0.01 }, + { 0.00, 0.00, 0.38, 0.76, 1.00 }, + { 1.00, 0.76, 0.38, 0.00, 0.00 }, + { 0.00, 0.00, 0.27, 0.71, 1.00 }, + { 1.00, 0.71, 0.27, 0.00, 0.00 }, + { 0.00, 0.35, 0.62, 0.85, 1.00 }, + { 1.00, 0.75, 0.38, 0.00, 0.00 }, + { 0.24, 1.00, 0.75, 0.18, 0.00 }, + { 0.00, 0.18, 0.75, 1.00, 0.24 }, + { 0.00, 0.34, 0.61, 0.84, 1.00 }, + { 1.00, 0.84, 0.61, 0.34, 0.00 } + }; + Double_t blue[14][NRGBs] = {{ 0.51, 1.00, 0.12, 0.00, 0.00 }, + { 0.00, 0.00, 0.12, 1.00, 0.51 }, + { 0.00, 0.09, 0.18, 0.09, 0.00 }, + { 0.00, 0.09, 0.18, 0.09, 0.00 }, + { 0.00, 0.47, 0.83, 1.00, 1.00 }, + { 1.00, 1.00, 0.83, 0.47, 0.00 }, + { 0.00, 0.00, 0.00, 0.40, 1.00 }, + { 1.00, 0.40, 0.00, 0.00, 0.00 }, + { 0.00, 0.00, 0.00, 0.47, 1.00 }, + { 1.00, 0.47, 0.00, 0.00, 0.00 }, + { 0.00, 0.62, 1.00, 0.68, 0.12 }, + { 0.12, 0.68, 1.00, 0.62, 0.00 }, + { 0.00, 0.34, 0.61, 0.84, 1.00 }, + { 1.00, 0.84, 0.61, 0.34, 0.00 } + }; + + + TColor::CreateGradientColorTable(NRGBs, stops, red[pal], green[pal], blue[pal], NCont); + +} + +#ifdef glx__sim +bool glx_init(TString inFile="../build/hits.root", Int_t bdigi=0, TString savepath=""){ + // if(inFile==""){ + // std::cout<<"glxtools: no input file "<Add(inFile); + // glx_ch->SetBranchAddress("DrcEvent", &glx_event); + // glx_entries = glx_ch->GetEntries(); + // std::cout<<"Entries in chain: "<Add(inFile); + glx_events = new TClonesArray("DrcEvent"); + glx_ch->SetBranchAddress("DrcEvent", &glx_events); + + glx_entries = glx_ch->GetEntries(); + std::cout<<"Entries in chain: "<Get("dirc"); + + glx_ch = new TChain("dirc"); + glx_ch->Add(inFile); + glx_events = new TClonesArray("DrcEvent"); + glx_ch->SetBranchAddress("DrcEvent", &glx_events); + // glx_ch->SetMaxVirtualSize(20e+9); + // int res = glx_ch->LoadBaskets(5E+9); + // std::cout<<"res "<Draw(">>glx_cutlist",TCut(cut),"entrylist"); + glx_elist = (TEntryList*)gDirectory->Get("glx_cutlist"); + + glx_entries = glx_ch->GetEntries(); + std::cout<<"Entries in chain: "<GetN()<GetEntry(ievent); + if(ievent%printstep==0 && ievent!=0) cout<<"Event # "<GetHitSize()<GetApplication()){ + TIter next(gROOT->GetApplication()->InputFiles()); + TObjString *os=0; + while((os = (TObjString*)next())){ + glx_info += os->GetString()+" "; + } + glx_info += "\n"; + } + glx_momentum = glx_event->GetMomentum().Mag(); + glx_pdg = glx_event->GetPdg(); + glx_test1 = glx_event->GetTest1(); + glx_test2 = glx_event->GetTest2(); + glx_theta=glx_event->GetMomentum().Theta()*180/TMath::Pi(); + glx_phi=glx_event->GetMomentum().Phi()*180/TMath::Pi(); + } +} + +void glx_nextEventc(Int_t ievent,Int_t itrack, Int_t printstep){ + glx_event= (DrcEvent*) glx_events->At(itrack); + if(ievent%printstep==0 && ievent!=0 && itrack==0) cout<<"Event # "<GetHitSize()<GetApplication()){ + TIter next(gROOT->GetApplication()->InputFiles()); + TObjString *os=0; + while((os = (TObjString*)next())){ + glx_info += os->GetString()+" "; + } + glx_info += "\n"; + } + // glx_momentum = glx_event->GetMomentum().Mag(); + // glx_pdg = glx_event->GetPdg(); + // glx_test1 = glx_event->GetTest1(); + // glx_test2 = glx_event->GetTest2(); + // glx_theta=glx_event->GetMomentum().Theta()*180/TMath::Pi(); + // glx_phi=glx_event->GetMomentum().Phi()*180/TMath::Pi(); +} + +#endif + +TSpectrum *glx_spect = new TSpectrum(2); +TF1 *glx_gaust; +TVector3 glx_fit(TH1F *h, Double_t range = 3, Double_t threshold=20, Double_t limit=2, Int_t peakSearch=1,TString opt=""){ + Int_t binmax = h->GetMaximumBin(); + Double_t xmax = h->GetXaxis()->GetBinCenter(binmax); + glx_gaust = new TF1("glx_gaust","[0]*exp(-0.5*((x-[1])/[2])^2)",xmax-range,xmax+range); + glx_gaust->SetNpx(500); + glx_gaust->SetParNames("const","mean","sigma"); + glx_gaust->SetLineColor(2); + Double_t integral = h->Integral(h->GetXaxis()->FindBin(xmax-range),h->GetXaxis()->FindBin(xmax+range)); + Double_t xxmin, xxmax, sigma1(0), mean1(0), sigma2(0), mean2(0); + xxmax = xmax; + xxmin = xxmax; + Int_t nfound(1); + if(integral>threshold){ + + if(peakSearch == 1){ + glx_gaust->SetParameter(1,xmax); + glx_gaust->SetParameter(2,0.6*limit); + glx_gaust->SetParLimits(2,0.1*limit,limit); + h->Fit("glx_gaust",opt,"",xxmin-range, xxmax+range); + h->Fit("glx_gaust",opt,"",xxmin-range, xxmax+range); + } + + if(peakSearch == 2){ + nfound = glx_spect->Search(h,4,"",0.1); + std::cout<<"nfound "<SetNpx(500); + glx_gaust->SetParameter(1,glx_spect->GetPositionX()[0]); + }else if(nfound==2) { + Double_t p1 = glx_spect->GetPositionX()[0]; + Double_t p2 = glx_spect->GetPositionX()[1]; + if(p1>p2) { + xxmax = p1; + xxmin = p2; + }else { + xxmax = p1; + xxmin = p2; + } + glx_gaust =new TF1("glx_gaust","gaus(0)+gaus(3)",xmax-range,xmax+range); + glx_gaust->SetNpx(500); + glx_gaust->SetParameter(0,1000); + glx_gaust->SetParameter(3,1000); + + glx_gaust->FixParameter(1,xxmin); + glx_gaust->FixParameter(4,xxmax); + glx_gaust->SetParameter(2,0.1); + glx_gaust->SetParameter(5,0.1); + h->Fit("glx_gaust","","MQN",xxmin-range, xxmax+range); + glx_gaust->ReleaseParameter(1); + glx_gaust->ReleaseParameter(4); + } + + glx_gaust->SetParameter(2,0.2); + glx_gaust->SetParameter(5,0.2); + } + + //h->Fit("glx_gaust",opt,"MQN",xxmin-range, xxmax+range); + mean1 = glx_gaust->GetParameter(1); + sigma1 = glx_gaust->GetParameter(2); + if(sigma1>10) sigma1=10; + + if(peakSearch == 2){ + mean2 = (nfound==1) ? glx_gaust->GetParameter(1) : glx_gaust->GetParameter(4); + sigma2 = (nfound==1) ? glx_gaust->GetParameter(2) : glx_gaust->GetParameter(5); + } + } + delete glx_gaust; + return TVector3(mean1,sigma1,mean2); +} + +TString glx_randStr(Int_t len = 10){ + gSystem->Sleep(1500); + srand (time(NULL)); + TString str = ""; + static const char alphanum[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + + for (int i = 0; i < len; ++i) { + str += alphanum[rand() % (sizeof(alphanum) - 1)]; + } + return str; +} + +Int_t glx_getColorId(Int_t ind, Int_t style =0){ + Int_t cid = 1; + if(style==0) { + cid=ind+1; + if(cid==5) cid =8; + if(cid==3) cid =15; + } + if(style==1) cid=ind+300; + return cid; +} + +Int_t glx_shiftHist(TH1F *hist, Double_t double_shift){ + Int_t bins=hist->GetXaxis()->GetNbins(); + Double_t xmin=hist->GetXaxis()->GetBinLowEdge(1); + Double_t xmax=hist->GetXaxis()->GetBinUpEdge(bins); + double_shift=double_shift*(bins/(xmax-xmin)); + Int_t shift=0; + if(double_shift<0) shift=TMath::FloorNint(double_shift); + if(double_shift>0) shift=TMath::CeilNint(double_shift); + if(shift==0) return 0; + if(shift>0){ + for(Int_t i=1; i<=bins; i++){ + if(i+shift<=bins) hist->SetBinContent(i,hist->GetBinContent(i+shift)); + if(i+shift>bins) hist->SetBinContent(i,0); + } + return 0; + } + if(shift<0){ + for(Int_t i=bins; i>0; i--){ + if(i+shift>0) hist->SetBinContent(i,hist->GetBinContent(i+shift)); + if(i+shift<=0) hist->SetBinContent(i,0); + } + return 0; + } + return 1; +} + +void glx_writeInfo(TString filename){ + ofstream myfile; + myfile.open (filename); + myfile << glx_info+"\n"; + myfile.close(); +} + + +TString glx_createDir(){ + TString finalpath = glx_savepath; + + if(finalpath =="") return ""; + + if(glx_savepath == "auto") { + TString dir = "data"; + gSystem->mkdir(dir); + TDatime *time = new TDatime(); + TString path(""), stime = Form("%d.%d.%d", time->GetDay(),time->GetMonth(),time->GetYear()); + gSystem->mkdir(dir+"/"+stime); + for(Int_t i=0; i<1000; i++){ + path = stime+"/"+Form("arid-%d",i); + if(gSystem->mkdir(dir+"/"+path)==0) break; + } + gSystem->Unlink(dir+"/last"); + gSystem->Symlink(path, dir+"/last"); + finalpath = dir+"/"+path; + }else{ + gSystem->mkdir(glx_savepath,kTRUE); + } + glx_writeInfo(finalpath+"/readme"); + return finalpath; +} + +void glx_addInfo(TString str){ + glx_info += str+"\n"; +} + +void glx_save(TPad *c= NULL,TString path="", TString name="", Int_t what=0, Int_t style=0){ + if(c && path != "") { + bool bstate = gROOT->IsBatch(); + gROOT->SetBatch(1); + Int_t w = 800, h = 400; + if(style != -1){ + if(style == 1) {w = 800; h = 500;} + if(style == 2) {w = 800; h = 600;} + if(style == 3) {w = 800; h = 400;} + if(style == 5) {w = 800; h = 900;} + if(style == 0){ + w = ((TCanvas*)c)->GetWindowWidth(); + h = ((TCanvas*)c)->GetWindowHeight(); + } + + TCanvas *cc; + if(TString(c->GetName()).Contains("cdigi") || TString(c->GetName()).Contains("hp_")){ + cc = glx_drawDigi(glx_last_maxz,glx_last_minz); + cc->SetCanvasSize(w,h); + if(name.Contains("=")) name = name.Tokenize('=')->First()->GetName(); + }else{ + cc = new TCanvas(TString(c->GetName())+"exp","cExport",0,0,w,h); + cc = (TCanvas*) c->DrawClone(); + cc->SetCanvasSize(w,h); + if(fabs(cc->GetBottomMargin()-0.1)<0.001) cc->SetBottomMargin(0.12); + } + + if(style == 0) { + if(fabs(cc->GetBottomMargin()-0.1)<0.001) cc->SetBottomMargin(0.12); + TIter next(cc->GetListOfPrimitives()); + TObject *obj; + + while((obj = next())){ + if(obj->InheritsFrom("TH1")){ + TH1F *hh = (TH1F*)obj; + hh->GetXaxis()->SetTitleSize(0.06); + hh->GetYaxis()->SetTitleSize(0.06); + + hh->GetXaxis()->SetLabelSize(0.05); + hh->GetYaxis()->SetLabelSize(0.05); + + hh->GetXaxis()->SetTitleOffset(0.85); + hh->GetYaxis()->SetTitleOffset(0.76); + + if(h>500){ + cc->SetBottomMargin(0.10); + hh->GetXaxis()->SetTitleSize(0.04); + hh->GetYaxis()->SetTitleSize(0.04); + hh->GetZaxis()->SetTitleSize(0.04); + + hh->GetXaxis()->SetLabelSize(0.03); + hh->GetYaxis()->SetLabelSize(0.03); + + hh->GetXaxis()->SetTitleOffset(0.85); + hh->GetYaxis()->SetTitleOffset(1.1); + } + + if(fabs(cc->GetBottomMargin()-0.12)<0.001){ + TPaletteAxis *palette = (TPaletteAxis*)hh->GetListOfFunctions()->FindObject("palette"); + if(palette) { + palette->SetY1NDC(0.12); + cc->Modified(); + } + } + } + if(obj->InheritsFrom("TGraph")){ + TGraph *gg = (TGraph*)obj; + gg->GetXaxis()->SetLabelSize(0.05); + gg->GetXaxis()->SetTitleSize(0.06); + gg->GetXaxis()->SetTitleOffset(0.84); + + gg->GetYaxis()->SetLabelSize(0.05); + gg->GetYaxis()->SetTitleSize(0.06); + gg->GetYaxis()->SetTitleOffset(0.7); + } + if(obj->InheritsFrom("TF1")){ + TF1 *f = (TF1*)obj; + f->SetNpx(500); + } + } + } + + cc->Modified(); + cc->Update(); + + cc->Print(path+"/"+name+".png"); + if(what>0) cc->Print(path+"/"+name+".C"); + if(what>1) cc->Print(path+"/"+name+".pdf"); + if(what>2) cc->Print(path+"/"+name+".eps"); + }else{ + c->Print(path+"/"+name+".png"); + if(what>0) c->Print(path+"/"+name+".C"); + if(what>1) c->Print(path+"/"+name+".pdf"); + if(what>2) c->Print(path+"/"+name+".eps"); + } + gROOT->SetBatch(bstate); + } +} + +TString glx_createSubDir(TString dir="dir"){ + gSystem->mkdir(dir); + return dir; +} + +TList *glx_canvasList; +void glx_canvasAdd(TString name="c",Int_t w=800, Int_t h=600){ + if(!glx_canvasList) glx_canvasList = new TList(); + + bool found = false; + TIter next(glx_canvasList); + TCanvas *c = 0; + while((c = (TCanvas*) next())){ + if(c->GetName()==name || name=="*") { + found = true; + c->cd(); + break; + } + } + if (!found) glx_canvasList->Add(new TCanvas(name, name, 0, 0, w, h)); +} + +TCanvas *glx_canvasGet(TString name="c"){ + TIter next(glx_canvasList); + TCanvas *c=0; + while((c = (TCanvas*) next())){ + if(c->GetName()==name || name=="*") break; + } + return c; +} + +TCanvas *glx_canvasAddOrGet(TString name = "c", Int_t w = 800, Int_t h = 600) { + if (!glx_canvasList) glx_canvasList = new TList(); + + bool found = false; + TIter next(glx_canvasList); + TCanvas *c = 0; + while((c = (TCanvas*) next())){ + if(c->GetName()==name || name=="*") { + c->cd(); + return c; + } + } + auto cr = new TCanvas(name, name, 0, 0, w, h); + glx_canvasList->Add(cr); + return cr; +} + +void glx_canvasAdd(TCanvas *c){ + if(!glx_canvasList) glx_canvasList = new TList(); + glx_canvasList->Add(c); +} + +void glx_canvasCd(TString name="c"){ + +} + +void glx_canvasDel(TString name="c"){ + TIter next(glx_canvasList); + TCanvas *c=0; + while(((c = (TCanvas*) next()))){ + if(c->GetName()==name) glx_canvasList->Remove(c); + } +} + +void glx_waitPrimitive(TCanvas *c){ + c->Modified(); + c->Update(); + c->WaitPrimitive(); +} + +void glx_waitPrimitive(TString name, TString prim=""){ + TIter next(glx_canvasList); + TCanvas *c=0; + while((c = (TCanvas*) next())){ + if(TString(c->GetName())==name){ + c->Modified(); + c->Update(); + c->WaitPrimitive(prim); + } + } +} + +// style = 0 - for web blog +// style = 1 - for talk +// what = 0 - save in png, pdf, root formats +// what = 1 - save in png format +void glx_canvasSave(TString spath="data/temp", Int_t what=0, Int_t style=0){ + glx_savepath = spath; + TIter next(glx_canvasList); + TCanvas *c=0; + TString path = glx_createDir(); + while((c = (TCanvas*) next())){ + glx_save(c, path, c->GetName(), what,style); + glx_canvasList->Remove(c); + } +} + +void glx_normalize(TH1F* hists[],Int_t size){ + Double_t max = 0; + Double_t min = 0; + for(Int_t i=0; iGetBinContent(hists[i]->GetMaximumBin()); + Double_t tmin = hists[i]->GetMinimum(); + if(tmax>max) max = tmax; + if(tminGetYaxis()->SetRangeUser(min,max); + } +} + +void glx_normalize_to(TH1F *hists[], int size, double max) { + + for (int i = 0; i < size; i++) { + double tmax = hists[i]->GetBinContent(hists[i]->GetMaximumBin()); + if (tmax > 0) hists[i]->Scale(max / tmax); + } +} + +int glx_findPdgId(int pdg){ + int pdgId=0; // electron by default + if(fabs(pdg) == 13) pdgId=1; + if(fabs(pdg) == 211) pdgId=2; + if(fabs(pdg) == 321) pdgId=3; + if(fabs(pdg) == 2212) pdgId=4; + return pdgId; +} + +void glx_normalize(TH1F* h1,TH1F* h2){ + Double_t max = (h1->GetMaximum()>h2->GetMaximum())? h1->GetMaximum() : h2->GetMaximum(); + max += max*0.1; + h1->GetYaxis()->SetRangeUser(0,max); + h2->GetYaxis()->SetRangeUser(0,max); +} + +double glx_readcorrection(TString in, TString key){ + ifstream ifs; + ifs.open(in); + + TString s; + double corr=0,c=0; + + while (1) { + ifs >> s >> c; + if(s==key) return c; + if (!ifs.good()) break; + } + ifs.close(); + return corr; +} + +void glx_writecorrection(TString out, TString key, double corr){ + ofstream ofs; + ofs.open(out,std::ios_base::app); + + ofs<GetXaxis(); + Int_t bmin = axis->FindBin(xmin); + Int_t bmax = axis->FindBin(xmax); + Double_t integral = h->Integral(bmin,bmax); + integral -= h->GetBinContent(bmin)*(xmin-axis->GetBinLowEdge(bmin))/axis->GetBinWidth(bmin); + integral -= h->GetBinContent(bmax)*(axis->GetBinUpEdge(bmax)-xmax)/axis->GetBinWidth(bmax); + return integral; +} + +double glx_separation(TH1F *h1, TH1F *h2){ + // int nbins = h1->GetNbinsX(); + // double x0 = h1->GetXaxis()->GetBinCenter(0); + // double x1 = h1->GetXaxis()->GetBinCenter(nbins); + + // double t1 = glx_integral(h1,x0,x1); + // double t2 = glx_integral(h2,x0,x1); + + // int i; + // for(i=0; iGetXaxis()->GetBinCenter(i)); + // double m2 = glx_integral(h1,x0,h1->GetXaxis()->GetBinCenter(i)); + // std::cout< +#include +#include +#include + +using namespace std; + +TGraph gg_gr; +void circleFcn(int &, double *, double &f, double *par, int) { + f = 0; + double *x = gg_gr.GetX(); + double *y = gg_gr.GetY(); + int np = gg_gr.GetN(); + for (int i = 0; i < np; i++) { + double u = x[i] - par[0]; + double v = y[i] - par[1]; + double d = fabs(par[2] - TMath::Sqrt(u * u + v * v)); + double e = sqrt(d); + f += d; + } + // f *= f; +} + +double mangle(int pid, double m) { + return acos(sqrt(m * m + glx_mass[pid] * glx_mass[pid]) / m / 1.4738); // 1.4738 +} + +void FitRing(double &x0, double &y0, double &theta, TGraph gr) { + + double *x = gr.GetX(); + double *y = gr.GetY(); + gg_gr = TGraph(); + for (int i = 0; i < gr.GetN(); i++) { + if (fabs(theta - TMath::Sqrt(x[i] * x[i] + y[i] * y[i])) < 0.007) { + gg_gr.SetPoint(gg_gr.GetN(), x[i], y[i]); + } + } + + // Fit a circle to the graph points + TVirtualFitter::SetDefaultFitter("Minuit"); // default is Minuit + TVirtualFitter *fitter = TVirtualFitter::Fitter(0, 3); + fitter->SetPrecision(0.00000001); + fitter->SetMaxIterations(1000); + + fitter->SetFCN(circleFcn); + fitter->SetParameter(0, "x0", 0, 0.001, -0.15, 0.15); + fitter->SetParameter(1, "y0", 0, 0.001, -0.15, 0.15); + fitter->SetParameter(2, "R", theta, 0.001, theta - 0.04, theta + 0.04); + + fitter->FixParameter(2); + double arglist[1] = {0}; + fitter->ExecuteCommand("MINIMIZE", arglist, 0); + + x0 = fitter->GetParameter(0); + y0 = fitter->GetParameter(1); + theta = fitter->GetParameter(2); +} + +void reco_lut_02(TString infile = "hd_root_gen_pik.root", TString inlut = "lut_05_avr.root", + int ibar = 32, int ibin = 3, double moms = 3.5, double scan = 0, double dx = 0.0, + double dy = 0, int vcorr = 2) { + + int indd = -1; + const int nodes = glx_maxch; + const int luts = 24; + + bool sim = false; + if (infile.Contains("hd_root_gen")) sim = true; + + TFile *fLut = new TFile(inlut); + TTree *tLut = (TTree *)fLut->Get("lut_dirc"); + TClonesArray *cLut[luts]; + for (int l = 0; l < luts; l++) { + cLut[l] = new TClonesArray("DrcLutNode"); + tLut->SetBranchAddress(Form("LUT_%d", l), &cLut[l]); + } + tLut->GetEntry(0); + + DrcLutNode *lutNode[luts][nodes]; + for (int l = 0; l < luts; l++) { + for (int i = 0; i < nodes; i++) lutNode[l][i] = (DrcLutNode *)cLut[l]->At(i); + } + TGaxis::SetMaxDigits(4); + + TVector3 fnX1 = TVector3(1, 0, 0); + TVector3 fnY1 = TVector3(0, 1, 0); + TVector3 fnZ1 = TVector3(0, 0, 1); + TVector3 cz; + const int nbins = 10; + + vector>> hCorrAD(48, + vector>(nbins, vector(glx_npmt))); + vector>> hCorrAR(48, + vector>(nbins, vector(glx_npmt))); + vector>> hCorrTD(48, + vector>(nbins, vector(glx_npmt))); + vector>> hCorrTR(48, + vector>(nbins, vector(glx_npmt))); + + TString stdiff = ";t_{measured}-t_{calculated} [ns];entries [#]"; + TString scdiff = ";#theta_{C reco} - #theta_{C expected} [mrad];entries [#]"; + + for (int b = 0; b < 48; b++) { + for (int x = 0; x < nbins; x++) { + for (int p = 0; p < glx_npmt; p++) { + hCorrAD[b][x][p] = new TH1F(Form("hCorrAD_%d_%d_%d", b, x, p), scdiff, 60, -50, 50); + hCorrAR[b][x][p] = new TH1F(Form("hCorrAR_%d_%d_%d", b, x, p), scdiff, 60, -50, 50); + hCorrTD[b][x][p] = new TH1F(Form("hCorrTD_%d_%d_%d", b, x, p), stdiff, 100, -5, 5); + hCorrTR[b][x][p] = new TH1F(Form("hCorrTR_%d_%d_%d", b, x, p), stdiff, 100, -5, 5); + } + } + } + + const int nphi = 80, ntheta = 40; + vector> hCorrLut(nphi, vector(ntheta)); + for (int b = 0; b < nphi; b++) { + for (int p = 0; p < ntheta; p++) { + hCorrLut[b][p] = new TH1F(Form("hCorrLut_p%d_t%d", b, p), stdiff, 100, -20, 20); + } + } + + double radiatorL = 489.712; // 4*122.5; + double barend = -294.022 + dx; // 4*1225-1960; -294.022 + + double minChangle = 0.6; + double maxChangle = 0.9; + double sum1, sum2, noise = 0.2; // 0.4 + // cuts + int anglecorr = (vcorr); // 2-apply + bool bfitcorr = (0); + double cut_cangle = 3.5 * 0.008; // 3.5 + double cut_tdiff = 0.5; + + double bar_corr_x[] = { + 0.000, -0.002, -0.004, -0.004, -0.004, -0.003, -0.003, -0.003, -0.004, -0.004, -0.003, -0.004, + -0.004, -0.005, -0.005, -0.004, -0.003, -0.004, -0.003, -0.004, -0.005, -0.005, -0.005, -0.005, + + -0.009, -0.002, -0.003, -0.002, -0.003, -0.002, -0.001, -0.001, 0.000, 0.000, 0.000, 0.000, + 0.000, -0.001, -0.001, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000}; + double bar_corr_y[] = { + 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, + 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, + + 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, + 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002}; + double sigma[] = {0.01, 0.01, 0.0073, 0.0073, 0.01}; + + double acorrAD[48][nbins][glx_npmt] = {{{0}}}; + double acorrAR[48][nbins][glx_npmt] = {{{0}}}; + double acorrTD[48][nbins][glx_npmt] = {{{0}}}; + double acorrTR[48][nbins][glx_npmt] = {{{0}}}; + double acorr3AD[48][nphi][ntheta] = {{{0}}}; + double acorr3AR[48][nphi][ntheta] = {{{0}}}; + double corrAD, corrAR, sigmaAD, corrTD, corrTR; + int cor_level = 0, tb, tp, tt, tbin, level; + + TString corrfile = infile + ".corr.root"; + if (ibar > -1 && ibin > -1) corrfile = infile + Form(".corr_%d_%d.root", ibar, ibin); + + if (anglecorr == 2) { + // read per pmt corrections + if (!gSystem->AccessPathName(corrfile)) { + std::cout << "--- reading corrections from " << corrfile << std::endl; + TChain ch; + ch.SetName("corr"); + ch.Add(corrfile); + ch.SetBranchAddress("bar", &tb); + ch.SetBranchAddress("pmt", &tp); + ch.SetBranchAddress("bin", &tbin); + ch.SetBranchAddress("level", &level); + ch.SetBranchAddress("zcorrAD", &corrAD); + ch.SetBranchAddress("zcorrAR", &corrAR); + ch.SetBranchAddress("zcorrTD", &corrTD); + ch.SetBranchAddress("zcorrTR", &corrTR); + ch.SetBranchAddress("zsigmaAD", &sigmaAD); + + for (int i = 0; i < ch.GetEntries(); i++) { + ch.GetEvent(i); + cor_level = level; + if (fabs(corrAD) < 10) { + acorrAD[tb][tbin][tp] = 0.001 * corrAD; + acorrAR[tb][tbin][tp] = 0.001 * corrAR; + acorrTD[tb][tbin][tp] = corrTD; + acorrTR[tb][tbin][tp] = corrTR; + } + + std::cout << "L " << cor_level << " bar = " << tb << " bin = " << tbin << " pmt = " << tp + << Form(" ad %-8.5f ar %-8.5f", acorrAD[tb][tbin][tp], acorrAR[tb][tbin][tp]) + << Form(" td %-8.5f tr %-8.5f", acorrTD[tb][tbin][tp], acorrTR[tb][tbin][tp]) + << std::endl; + } + } else { + cor_level = 0; + std::cout << "--- corr file not found " << corrfile << std::endl; + } + + if (cor_level == 0) cut_tdiff = 2.0; + if (cor_level == 1) cut_tdiff = 0.5; + } + + if (anglecorr == 3) { + corrfile = infile + ".corr3.root"; + std::cout << "======= reading corrections from " << corrfile << std::endl; + TChain ch; + ch.SetName("corr"); + ch.Add(corrfile); + ch.SetBranchAddress("bar", &tb); + ch.SetBranchAddress("itheta", &tt); + ch.SetBranchAddress("iphi", &tp); + ch.SetBranchAddress("zcorrAD", &corrAD); + ch.SetBranchAddress("zcorrAR", &corrAR); + ch.SetBranchAddress("zsigmaAD", &sigmaAD); + + for (int i = 0; i < ch.GetEntries(); i++) { + ch.GetEvent(i); + if (fabs(corrAD) < 6) { + acorr3AD[tb][tp][tt] = 0.001 * corrAD; + acorr3AR[tb][tp][tt] = 0.001 * corrAR; + } + std::cout << "i " << i << " " << tb << "-" << tp << " " << tt << " " << acorr3AD[tb][tp][tt] + << std::endl; + } + } + + double criticalAngle = asin(1.00028 / 1.47125); // n_quarzt = 1.47125; //(1.47125 <==> 390nm) + double evtime, luttheta, tangle, lenx; + int64_t pathid; + TVector3 posInBar, posInBar_true, momInBar, dir, dird, ldir; + double cherenkovreco[5], cherenkovreco_err[5], spr[5]; + + TF1 *fit = + new TF1("fgaus", "[0]*exp(-0.5*((x-[1])/[2])*(x-[1])/[2]) +x*[3]+[4]", minChangle, maxChangle); + TSpectrum *spect = new TSpectrum(10); + TH1F *hAngle[5], *hLnDiff[5], *hNph[5]; + TH1F *hAngleU[5]; + TF1 *fAngle[5]; + double mAngle[5]; + TH1F *hDiff = new TH1F("hDiff", stdiff, 400, -10, 10); + TH1F *hDiffT = new TH1F("hDiffT", stdiff, 400, -10, 10); + TH1F *hDiffD = new TH1F("hDiffD", stdiff, 400, -10, 10); + TH1F *hDiffR = new TH1F("hDiffR", stdiff, 400, -10, 10); + TH1F *hTime = new TH1F("hTime", ";propagation time [ns];entries [#]", 1000, 0, 200); + TH1F *hCalc = new TH1F("hCalc", ";calculated time [ns];entries [#]", 1000, 0, 200); + TH1F *hNphC = new TH1F("hNphC", ";detected photons [#];entries [#]", 150, 0, 150); + TH2F *hCMom[5]; + TH2F *hRing = new TH2F("hRing", ";#theta_{c}sin(#varphi_{c});#theta_{c}cos(#varphi_{c})", 500, -1, + 1, 500, -1, 1); + TH2F *h2Time = new TH2F("h2Time", ";propagation time [ns];t_{measured}-t_{calculated} [ns];", 500, + 0, 100, 200, -5, 5); + TH2F *hWall = new TH2F("hWall", ";x [cm];y [cm]", 400, -100, 100, 400, -100, 100); + TH2F *hWallPos = new TH2F("hWalPos", ";x [cm];y [cm]", 50, -100, 100, 50, -100, 100); + int wallb[5000] = {0}; + TH2F *hSpr = new TH2F("hSpr", ";x [cm];y [cm]", 400, -100, 100, 400, -100, 100); + TH2F *hChrom = new TH2F("hChrom", ";t_{measured}-t_{calculated} [ns];#theta_{C} [mrad]", 100, -2, + 2, 60, -30, 30); + TH1F *hMult = new TH1F("hmult", "; track multiplicity [#];entries [#]", 8, 0, 8); + TH2F *hLutCorr = + new TH2F("hLutCorr", "hLutCorr", nphi, 0, TMath::TwoPi(), ntheta, 0, TMath::PiOver2()); + TH2F *hLutCorr1 = + new TH2F("hLutCorr1", "hLutCorr", nphi, -TMath::Pi(), TMath::Pi(), ntheta, 0, TMath::PiOver2()); + + hDiff->SetMinimum(0); + TGaxis::SetMaxDigits(3); + + for (int i = 0; i < 5; i++) { + double momentum = 4; + hAngle[i] = + new TH1F(Form("hAngle_%d", i), ";#theta_{C} [rad];entries/N_{max} [#]", 150, 0.6, 0.9); + hAngleU[i] = + new TH1F(Form("hAngleu_%d", i), ";#theta_{C} [rad];entries/N_{max} [#]", 150, 0.6, 0.9); + hCMom[i] = new TH2F(Form("cmom_%d", i), "hcmom", 1000, 0, 10, 500, 0.6, 0.9); + hNph[i] = new TH1F(Form("hNph_%d", i), ";detected photons [#];entries [#]", 80, 0, 80); + mAngle[i] = + acos(sqrt(momentum * momentum + glx_mass[i] * glx_mass[i]) / momentum / 1.473); // 1.4738 + fAngle[i] = new TF1(Form("fAngle_%d", i), "[0]*exp(-0.5*((x-[1])/[2])*(x-[1])/[2])", 0.7, 0.9); + fAngle[i]->SetParameter(0, 1); // const + fAngle[i]->SetParameter(1, mAngle[i]); // mean + fAngle[i]->SetParameter(2, sigma[i]); // sigma + hAngle[i]->SetMarkerStyle(20); + hAngle[i]->SetMarkerSize(0.8); + if (moms < 4) + hLnDiff[i] = + new TH1F(Form("hLnDiff_%d", i), ";ln L(#pi) - ln L(K);entries [#]", 80, -160, 160); + else + hLnDiff[i] = + new TH1F(Form("hLnDiff_%d", i), ";ln L(#pi) - ln L(K);entries [#]", 80, -100, 100); + } + + hAngle[2]->SetLineColor(4); + hAngle[3]->SetLineColor(2); + hAngle[2]->SetMarkerColor(kBlue + 1); + hAngle[3]->SetMarkerColor(kRed + 1); + fAngle[2]->SetLineColor(4); + fAngle[3]->SetLineColor(2); + + hLnDiff[2]->SetLineColor(4); + hLnDiff[3]->SetLineColor(2); + int evtcount = 0, count[5] = {0}; + bool debug = false; + TCanvas *cc; + if (debug) cc = new TCanvas("cc", "cc", 800, 800); + TLine *gLine = new TLine(); + TH1F *hphi = new TH1F("hphi", "hphi;[GeV/c];events [#]", 5000, 0, 1.5); + TH1F *hrho = new TH1F("hrho", "hphi;[GeV/c];events [#]", 5000, 0, 1.5); + + TGraph cagr; + double bartime, luttime, diftime, adiff, len, leny, lenz; + DrcHit hit; + double dibin = -100 + ibin * 20 + 10; + + // TCut cut = "(DrcEvent.fId>=4 && DrcEvent.fId<9)"; + TCut cut = ""; + // if (ibar > -1) cut += Form("(DrcEvent.fId == %d)", ibar); + if (ibar > -1) cut += Form("(fabs(DrcEvent.fId -%d) < 2)", ibar); + // cut += "fabs(DrcEvent.fPosition.fX-0)<5"; // 10 + if (ibin > -1) cut += Form("fabs(DrcEvent.fPosition.fX-%f)<10", dibin); // 10 + if (ibin > -1) cut += "DrcEvent.fPosition.fX < 0"; // 10 + // cut += "fabs(DrcEvent.fPdg) == 321"; + // if (cor_level < 2 && anglecorr > 0) cut += "fabs(DrcEvent.fMomentum.Mag())>2.5"; + if (cor_level < 2 && anglecorr > 0) cut += Form("fabs(DrcEvent.fMomentum.Mag()-%2.4f)<1", moms); + else cut += Form("fabs(DrcEvent.fMomentum.Mag()-%2.4f)<0.1", moms); + // cut += "fabs(DrcEvent.fMomentum.Mag())>3.5"; + // cut=""; + + cut += "DrcEvent.fPdg > 0"; + + // up + // cut += "(DrcEvent.fId >= 31)"; + // cut += "(DrcEvent.fId <= 35)"; + + // down + // cut += "(DrcEvent.fId >= 7)"; + // cut += "(DrcEvent.fId <= 11)"; + + cut += "fabs(DrcEvent.fPosition.fX-0)<20"; + + if (!glx_initc(infile, cut)) return; + + for (int e = 0; e < glx_elist->GetN() && e < glx_ch->GetEntries(); e++) { + glx_ch->GetEntry(glx_elist->GetEntry(e)); + + // if (cor_level == 2 && count[3] > 1000) break; + if (glx_events->GetEntriesFast() > 1) continue; + hMult->Fill(glx_events->GetEntriesFast()); + + for (int t = 0; t < glx_events->GetEntriesFast(); t++) { + glx_nextEventc(e, t, 1000); + + // if (glx_event->GetPdg() > 0) continue; + if (glx_event->GetType() > 0 && !sim) { // beam + // if(glx_event->GetType()!=2) continue; // 1-LED 2-beam 0-rest + // if(glx_event->GetDcHits()<25) continue; + // if (glx_event->GetTofTrackDist() > 1.5) continue; + if (fabs(glx_event->GetPdg()) == 211) { + if (glx_event->GetChiSq() > 10) continue; + if (fabs(glx_event->GetInvMass() - 0.77) > 0.01) continue; // 0.05 + if (fabs(glx_event->GetMissMass() + 0.0001) > 0.001) continue; // 0.001 + } else if (fabs(glx_event->GetPdg()) == 321) { + if (fabs(glx_event->GetInvMass() - 1.02) > 0.01) continue; // 0.02 + if (fabs(glx_event->GetMissMass() + 0.0001) > 0.003) continue; // 0.007 + } else continue; + } else { // geant + noise = 0.2; + } + + int pdgId = glx_findPdgId(glx_event->GetPdg()); + int bar = glx_event->GetId(); + int lid = bar; + int opbox = 0; + double time0 = glx_event->GetTime(); + + if (glx_event->GetPdg() == 321) hphi->Fill(glx_event->GetInvMass()); + if (glx_event->GetPdg() == 211) hrho->Fill(glx_event->GetInvMass()); + + posInBar = glx_event->GetPosition(); + posInBar_true = glx_event->GetPosition_Truth(); + + momInBar = glx_event->GetMomentum(); + double momentum = momInBar.Mag(); + int bin = (100 + posInBar.X()) / 200. * nbins; + + // selection + // if (bar != ibar && ibar > -1) continue; + if (fabs(bar - ibar) > 1 && ibar > -1) continue; + if (posInBar.X() > 0) continue; + if (fabs(posInBar.X() - dibin) > 10 && ibin > -1) continue; + if (cor_level < 2 && anglecorr > 0) { + // if (momentum < 2.5) continue; + if (fabs(momentum - moms) > 1) continue; + } else { + + // select tracks close to the center of the bar + if (bar == 25 && fabs(posInBar.Y() - 14.25) > 1.25) continue; + if (bar == 26 && fabs(posInBar.Y() - 17.75) > 1.25) continue; + if (bar == 27 && fabs(posInBar.Y() - 21.25) > 1.25) continue; + if (bar == 28 && fabs(posInBar.Y() - 24.75) > 1.25) continue; + if (bar == 29 && fabs(posInBar.Y() - 28.25) > 1.25) continue; + if (bar == 30 && fabs(posInBar.Y() - 31.75) > 1.25) continue; + + if (bar == 31 && fabs(posInBar.Y() - 35.25) > 1.25) continue; + if (bar == 32 && fabs(posInBar.Y() - 38.75) > 1.25) continue; + if (bar == 33 && fabs(posInBar.Y() - 42.25) > 1.25) continue; + if (bar == 34 && fabs(posInBar.Y() - 45.75) > 1.25) continue; + if (bar == 35 && fabs(posInBar.Y() - 49.25) > 1.25) continue; + + if (pdgId == 2 && count[2] > 1000) continue; + if (pdgId == 3 && count[3] > 1000) continue; + // if (fabs(momentum - moms) > 0.1) continue; + } + + if (bar > 23) { + lid = bar - 24; + opbox = 1; + barend = 294.022; + } + + // if(glx_event->GetParent()>0) continue; + // track correction + // momInBar.RotateX(dx*TMath::DegToRad()); + // momInBar.RotateY(dy*TMath::DegToRad()); + + hWall->Fill(posInBar.X(), posInBar.Y()); + // double wdx = posInBar.X()-posInBar_true.X(); + // if(fabs(wdx)>2) continue; + // hWallPos->Fill(posInBar.X(),posInBar.Y(),wdx); + // int wx=hWallPos->FindBin(posInBar.X(),posInBar.Y()); + // wallb[wx]++; + + if (glx_event->GetType() > 0 && !sim) { // data + momInBar.RotateX(bar_corr_x[bar]); + momInBar.RotateY(bar_corr_y[bar]); + } else { // sim + cz = momInBar; + momInBar.RotateX(gRandom->Gaus(0, 0.002)); + momInBar.RotateY(gRandom->Gaus(0, 0.002)); + // momInBar.RotateX(gRandom->Gaus(0, 0.0025)); + // momInBar.Rotate(gRandom->Uniform(0, TMath::PiOver2()), cz); + // time0 += gRandom->Gaus(0, 0.25); + } + + cz = momInBar.Unit(); + + for (int p = 0; p < 5; p++) { + mAngle[p] = mangle(p, momentum); + fAngle[p]->SetParameter(1, mAngle[p]); // mean + fAngle[p]->SetParameter(2, sigma[p]); + } + + if (bfitcorr) { + fAngle[2]->SetParameter(2, 0.0067); + fAngle[3]->SetParameter(2, 0.0067); + + TGraph cgr; + sum1 = 0; + sum2 = 0; + for (int h = 0; h < glx_event->GetHitSize(); h++) { + hit = glx_event->GetHit(h); + int ch = hit.GetChannel(); + int pmt = hit.GetPmtId(); + int pix = hit.GetPixelId(); + + if (opbox == 1) { + pmt -= 108; + ch -= 108 * 64; + } + + double hitTime = hit.GetLeadTime() - time0 + 0.1; + if (pmt <= 10 || (pmt >= 90 && pmt <= 96)) continue; // dummy pmts + if (ch > glx_nch) continue; + + bool reflected = hitTime > 44; + + if (reflected) hitTime += acorrTR[bar][bin][pmt]; + else hitTime += acorrTD[bar][bin][pmt]; + + lenx = fabs(barend - posInBar.X()); + double rlenx = 2 * radiatorL - lenx; + double dlenx = lenx; + if (reflected) lenx = 2 * radiatorL - lenx; + double p1, p2; + + for (int i = 0; i < lutNode[lid][ch]->Entries(); i++) { + dird = lutNode[lid][ch]->GetEntry(i); + evtime = lutNode[lid][ch]->GetTime(i); + pathid = lutNode[lid][ch]->GetPathId(i); + int nrefl = lutNode[lid][ch]->GetNRefl(i); + double weight = lutNode[lid][ch]->GetWeight(i); + + for (int r = 0; r < 2; r++) { + if (!reflected && r == 1) continue; + + if (r) lenx = rlenx; + else lenx = dlenx; + + for (int u = 0; u < 4; u++) { + if (u == 0) dir = dird; + if (u == 1) dir.SetXYZ(dird.X(), -dird.Y(), dird.Z()); + if (u == 2) dir.SetXYZ(dird.X(), dird.Y(), -dird.Z()); + if (u == 3) dir.SetXYZ(dird.X(), -dird.Y(), -dird.Z()); + if (r) dir.SetXYZ(-dir.X(), dir.Y(), dir.Z()); + if (dir.Angle(fnY1) < criticalAngle || dir.Angle(fnZ1) < criticalAngle) continue; + dir = dir.Unit(); + luttheta = dir.Angle(TVector3(-1, 0, 0)); + + if (opbox == 1) { + dir.RotateZ(TMath::Pi()); + luttheta = dir.Angle(TVector3(1, 0, 0)); + } + if (r) luttheta = TMath::Pi() - luttheta; + tangle = momInBar.Angle(dir); + + bartime = lenx / cos(luttheta) / 19.65; + luttime = bartime + evtime; + diftime = hitTime - luttime; + + if (r) tangle += acorrAD[bar][bin][pmt]; // per PMT corr + else tangle += acorrAD[bar][bin][pmt]; + + if (fabs(diftime) < 2.0 && cor_level > 0) tangle -= 0.0025 * diftime; // chrom corr + + if (fabs(diftime) > 0.5 + luttime * 0.03) continue; + if (fabs(tangle - mAngle[2]) > 0.015 && fabs(tangle - mAngle[3]) > 0.015) continue; + + sum1 += TMath::Log(fAngle[2]->Eval(tangle) + noise); + sum2 += TMath::Log(fAngle[3]->Eval(tangle) + noise); + + TVector3 rdir = TVector3(-dir.Y(), -dir.X(), dir.Z()); + cz = momInBar.Unit(); + rdir.RotateUz(cz); + double lphi = rdir.Phi(); + double tx = tangle * TMath::Sin(lphi); + double ty = tangle * TMath::Cos(lphi); + hRing->Fill(tx, ty); + cgr.SetPoint(cgr.GetN(), tx, ty); + } + } + } + } + double theta = mAngle[3]; + if (sum1 > sum2) theta = mAngle[2]; + // theta = 0.5 * (mAngle[2] + mAngle[3]); + + { + double tx0(0), ty0(0); + FitRing(tx0, ty0, theta, cgr); + TVector3 rcorr(tx0, ty0, 1 - TMath::Sqrt(tx0 * tx0 + ty0 * ty0)); + TVector3 oo = momInBar; + momInBar.RotateX(2.5 * rcorr.Theta()); + momInBar.Rotate(rcorr.Phi(), oo); + std::cout << "====== " << rcorr.Theta() << " " << rcorr.Phi() << std::endl; + + if (debug) { + cc->cd(); + hRing->Draw("colz"); + gStyle->SetOptStat(0); + TLegend *legr = new TLegend(0.25, 0.4, 0.65, 0.6); + legr->SetFillStyle(0); + legr->SetBorderSize(0); + legr->AddEntry((TObject *)0, Form("Entries %0.0f", hRing->GetEntries()), ""); + legr->AddEntry((TObject *)0, Form("#Delta#theta_{c} %f [mrad]", rcorr.Theta() * 1000), + ""); + legr->AddEntry((TObject *)0, Form("#Delta#varphi_{c} %f [mrad]", rcorr.Phi()), ""); + legr->Draw(); + TArc *arc = new TArc(tx0, ty0, theta); + arc->SetLineColor(kRed); + arc->SetLineWidth(2); + arc->SetFillStyle(0); + arc->Draw(); + TArc *arcI = new TArc(0, 0, theta); + arcI->SetLineColor(kGreen); + arcI->SetLineWidth(2); + arcI->SetFillStyle(0); + arcI->Draw(); + + cc->Update(); + cc->WaitPrimitive(); + // cc->SetName(Form("hRing_e%d",++indd)); + // glx_canvasAdd(cc); + // glx_canvasSave(1); + // glx_canvasDel(Form("hRing_e%d",indd)); + hRing->Reset(); + } + } + } + + sum1 = 0; + sum2 = 0; + int nph = 0, nphc = 0; + // hNphC->Fill(glx_event->GetHitSize()); + bool goodevt = 0; + + for (int h = 0; h < glx_event->GetHitSize(); h++) { + hit = glx_event->GetHit(h); + int ch = hit.GetChannel(); + int pmt = hit.GetPmtId(); + int pix = hit.GetPixelId(); + + if (opbox == 0) { + if (ch > glx_nch) continue; + } else { + if (ch < glx_nch) continue; + pmt -= glx_npmt; + ch -= glx_nch; + } + + if (pmt <= 10 || (pmt >= 90 && pmt <= 96)) continue; // dummy pmts + + double hitTime = hit.GetLeadTime() - time0 + 0.1; + if (sim) { + if (gRandom->Uniform(0, 1) < 0.36) continue; + } + + nphc++; + bool reflected = hitTime > 44; + + if (reflected) hitTime += acorrTR[bar][bin][pmt]; + else hitTime += acorrTD[bar][bin][pmt]; + + // if(!reflected) continue; + lenx = fabs(barend - posInBar.X()); + double rlenx = 2 * radiatorL - lenx; + double dlenx = lenx; + if (reflected) lenx = 2 * radiatorL - lenx; + + bool isGood(false); + double p1, p2; + + for (int i = 0; i < lutNode[lid][ch]->Entries(); i++) { + dird = lutNode[lid][ch]->GetEntry(i); + evtime = lutNode[lid][ch]->GetTime(i); + pathid = lutNode[lid][ch]->GetPathId(i); + int nrefl = lutNode[lid][ch]->GetNRefl(i); + double weight = 1.5; // 20* lutNode[lid][ch]->GetWeight(i); + // if(weight<0.02*20) continue; + + TString spath = Form("%ld", pathid); + // // if (!spath.BeginsWith("")) continue; + if (spath.Contains("8")) continue; + if (spath.Contains("7")) continue; + + // if (!spath.Contains("92")) continue; + // if(!spath.BeginsWith("42920")) continue; + // if(!spath.EqualTo("42920")) continue; + // if(!spath.EqualTo("32412930")) continue; + // if (nrefl > 3) continue; + + bool samepath(false); + if (fabs(pathid - hit.GetPathId()) < 0.0001) samepath = true; + p1 = hit.GetPathId(); + if (samepath) p2 = pathid; + // if(!samepath) continue; + + if (opbox == 1) { + dird.RotateZ(TMath::Pi()); + ldir = dird; + ldir.RotateY(-TMath::PiOver2()); + } else { + ldir = dird; + ldir.RotateY(TMath::PiOver2()); + ldir.RotateX(-TMath::Pi()); + } + + // int iphi = nphi*(ldir.Phi()+TMath::Pi())/TMath::TwoPi(); + // int itheta = ntheta*(ldir.Theta())/TMath::PiOver2(); + + double lphi = ldir.Phi(); + double ltheta = ldir.Theta(); + if (lphi < 0) lphi = TMath::TwoPi() + lphi; + if (ltheta > TMath::PiOver2()) ltheta = TMath::Pi() - ltheta; + + int iphi = nphi * (lphi) / TMath::TwoPi(); + int itheta = ntheta * (ltheta) / TMath::PiOver2(); + + for (int r = 0; r < 2; r++) { + if (!reflected && r == 1) continue; + if (r) lenx = rlenx; + else lenx = dlenx; + + for (int u = 0; u < 4; u++) { + if (u == 0) dir = dird; + if (u == 1) dir.SetXYZ(dird.X(), -dird.Y(), dird.Z()); + if (u == 2) dir.SetXYZ(dird.X(), dird.Y(), -dird.Z()); + if (u == 3) dir.SetXYZ(dird.X(), -dird.Y(), -dird.Z()); + if (r) dir.SetXYZ(-dir.X(), dir.Y(), dir.Z()); + if (dir.Angle(fnY1) < criticalAngle || dir.Angle(fnZ1) < criticalAngle) continue; + dir = dir.Unit(); + + if (opbox == 0) luttheta = dir.Angle(TVector3(-1, 0, 0)); + else luttheta = dir.Angle(TVector3(1, 0, 0)); + if (r) luttheta = TMath::Pi() - luttheta; + + len = fabs(lenx / cos(luttheta)); + lenz = fabs(len * dir.Z()); + leny = fabs(len * dir.Y()); + bartime = lenx / cos(luttheta) / 19.65; // 19.7 203.767 for 1.47125 + luttime = bartime + evtime; + diftime = hitTime - luttime; + + tangle = momInBar.Angle(dir); + // if (spath.BeginsWith("32")) tangle += -0.0015; + // if (spath.BeginsWith("42")) tangle += -0.002; + // if (spath.BeginsWith("31")) tangle += 0.0005; + // if (spath.BeginsWith("41")) tangle += 0.0005; + + if (r) tangle += acorrAD[bar][bin][pmt]; // per PMT corr + else tangle += acorrAD[bar][bin][pmt]; + + if (fabs(diftime) < 2.0 && cor_level > 0) tangle -= 0.0025 * diftime; // chrom corr + + hTime->Fill(hitTime); + hCalc->Fill(luttime); + // if(dir.Theta()>TMath::PiOver2()) continue; + // if(fabs(tangle-mAngle[2])>0.02) continue; + // std::cout<Fill(diftime); + hDiffT->Fill(diftime); + if (r) hDiffR->Fill(diftime); + else hDiffD->Fill(diftime); + } + + if (fabs(diftime) > cut_tdiff + luttime * 0.025) continue; + + adiff = (tangle - mAngle[pdgId]) * 1000; + if (pdgId == 2 && fabs(adiff) < 50) { + hLutCorr->Fill(lphi, ltheta); + hCorrLut[iphi][itheta]->Fill(adiff); + + if (r) { + if (fabs(diftime) < 2) hCorrAD[bar][bin][pmt]->Fill(adiff); + if (adiff < 20) hCorrTR[bar][bin][pmt]->Fill(diftime); + } else { + hCorrAD[bar][bin][pmt]->Fill(adiff); + hCorrTD[bar][bin][pmt]->Fill(diftime); + } + } + + hChrom->Fill(diftime, adiff); + + if (pdgId == 2) hAngleU[u]->Fill(tangle, weight); + hAngle[pdgId]->Fill(tangle, weight); + + if (fabs(tangle - mAngle[2]) > 1 * cut_cangle && + fabs(tangle - mAngle[3]) > 1 * cut_cangle) + continue; // 8 + + if (1 && pdgId == 2) { + TVector3 rdir = TVector3(dir.Y(), dir.X(), dir.Z()); + cz = momInBar.Unit(); + rdir.RotateUz(cz); + + double rangle = tangle - mangle(2, momentum) + mangle(2, 3); + double lphi = rdir.Phi(); + double tx = rangle * TMath::Sin(lphi); // rdir.Theta(); + double ty = rangle * TMath::Cos(lphi); + double tp = asin(ty / rangle); + if (tx < 0) tp = -asin(ty / rangle) + TMath::Pi(); + tp *= TMath::RadToDeg(); + tp += 90; + + // if(fabs(tp-300)<5) continue; + // double tt = rdir.Theta(); + if (fabs(tangle - mAngle[2]) < 0.012) hRing->Fill(tx, ty); + + // for cherenckov circle fit + cagr.SetPoint(cagr.GetN(), tx, ty); + } + + isGood = true; + + // if(r && pdgId==2) + h2Time->Fill(hitTime, diftime); + // hTime->Fill(hitTime); + // hCalc->Fill(luttime); + + sum1 += weight * TMath::Log(fAngle[2]->Eval(tangle) + noise); + sum2 += weight * TMath::Log(fAngle[3]->Eval(tangle) + noise); + + if (0) { + TString x = (sum1 > sum2) ? " <====== PION" : ""; + std::cout << Form("%1.6f %1.6f | %1.6f %1.6f pid %d", + TMath::Log(fAngle[2]->Eval(tangle) + noise), + TMath::Log(fAngle[3]->Eval(tangle) + noise), sum1, sum2, pdgId) + << " " << std::endl; + + cc->cd(); + fAngle[2]->Draw(""); + fAngle[3]->Draw("same"); + + cc->Update(); + gLine->SetLineWidth(2); + gLine->SetX1(tangle); + gLine->SetX2(tangle); + gLine->SetY1(cc->GetUymin()); + gLine->SetY2(cc->GetUymax()); + gLine->Draw(); + cc->Update(); + cc->WaitPrimitive(); + } + } + } + } + + if (isGood) { + nph++; + if (pmt < 108) { + glx_hdigi[pmt]->Fill(pix % 8, pix / 8); + goodevt = 1; + } + } + } + + if (goodevt) evtcount++; + + if (nph < 10) continue; + hNph[pdgId]->Fill(nph); + hNphC->Fill(nphc); + + if (0 && pdgId == 2) { + // double xangle = glx_fit(hAngle[pdgId],0.02,10,0.008,1,"Q0").X(); + // if(xangle>0.84) continue; + // hCMom[pdgId]->Fill(momentum,xangle); + // hAngle[pdgId]->Reset(); + + double sigmat = glx_fit(hAngle[pdgId], 0.02, 10, 0.008, 1, "Q0").Y(); + // hSpr->Fill(x,y,sigmat); + hAngle[pdgId]->Reset(); + } + + // auto cdigi = glx_drawDigi(); + // cdigi->SetName(Form("hp_k_%d", evtcount)); + // glx_canvasAdd(cdigi); + // glx_canvasSave("data/reco_lut_02_scan_sel_02_single", 2); + // glx_resetDigi(); + + double sum = sum1 - sum2; + hLnDiff[pdgId]->Fill(sum); + + count[pdgId]++; + + if (0) { + // if(!cc) + TString x = (sum1 > sum2) ? " <====== Pion" : ""; + // std::cout<GetPdg() << " sum1 " << sum1 << " sum2 " << sum2 + << " sum " << sum << " " << x << std::endl; + + cc->cd(); + + // if(hAngle[pdgId]->GetMaximum()>0) hAngle[pdgId]->Scale(1/hAngle[pdgId]->GetMaximum()); + + hAngle[pdgId]->Draw(""); + fAngle[2]->Draw("same"); + fAngle[3]->Draw("same"); + + cc->Update(); + TLine *line = new TLine(0, 0, 0, 1000); + line->SetX1(mAngle[3]); + line->SetX2(mAngle[3]); + line->SetY1(cc->GetUymin()); + line->SetY2(cc->GetUymax()); + line->SetLineColor(kRed); + line->Draw(); + + TLine *line2 = new TLine(0, 0, 0, 1000); + line2->SetX1(mAngle[2]); + line2->SetX2(mAngle[2]); + line2->SetY1(cc->GetUymin()); + line2->SetY2(cc->GetUymax()); + line2->SetLineColor(kBlue); + line2->Draw(); + + cc->Update(); + cc->WaitPrimitive(); + } + } + } + + if (evtcount > 0) { + for (int i = 0; i < glx_nch; i++) { + int pmt = i / 64; + int pix = i % 64; + double rel = glx_hdigi[pmt]->GetBinContent(pix % 8 + 1, pix / 8 + 1) / (double)evtcount; + glx_hdigi[pmt]->SetBinContent(pix % 8 + 1, pix / 8 + 1, rel); + } + } + + // TString nid = Form("_%d_%d_%2.1f_%2.4f_%2.3f_%2.3f", ibar, ibin, moms, scan, dx, d); + TString nid = Form("_%d_%d_%2.1f_%2.1f_PL", ibar, ibin, moms, scan); + if (sim) nid = "_simM" + nid; + nid.ReplaceAll("-", "m"); + nid.ReplaceAll(".", "d"); + + double nph = 0, nphm = 0, maxTD, maxTR, maxTT, sep = 0, esep = 0; + + if (anglecorr == 2 && cor_level < 2) { // per pmt correction + + TCanvas *canv_angle, *canv_time; + double zcorrAD, zsigmaAD, zcorrAR, zsigmaAR, zcorrTD, zsigmaTD, zcorrTR, zsigmaTR; + int bar, bin, pmt, level; + TFile fc(corrfile, "recreate"); + TTree *tc = new TTree("corr", "corr"); + tc->Branch("zcorrAD", &zcorrAD, "zcorrAD/D"); + tc->Branch("zcorrAR", &zcorrAR, "zcorrAR/D"); + tc->Branch("zcorrTD", &zcorrTD, "zcorrTD/D"); + tc->Branch("zcorrTR", &zcorrTR, "zcorrTR/D"); + tc->Branch("zsigmaAD", &zsigmaAD, "zsigmaAD/D"); + tc->Branch("zsigmaAR", &zsigmaAR, "zsigmaAR/D"); + tc->Branch("zsigmaTD", &zsigmaTD, "zsigmaTD/D"); + tc->Branch("zsigmaTR", &zsigmaTR, "zsigmaTR/D"); + tc->Branch("bar", &bar, "bar/I"); + tc->Branch("bin", &bin, "bin/I"); + tc->Branch("pmt", &pmt, "pmt/I"); + tc->Branch("level", &level, "level/I"); + + for (bar = 0; bar < 48; bar++) { + for (bin = 0; bin < nbins; bin++) { + for (pmt = 0; pmt < glx_npmt; pmt++) { + zcorrAD = 0; + zcorrTD = 0; + zsigmaAD = 0; + zsigmaTD = 0; + zcorrAR = 0; + zcorrTR = 0; + zsigmaAR = 0; + zsigmaTR = 0; + double xmax = 0; + + if (hCorrAD[bar][bin][pmt]->GetEntries() < 200) continue; + + glx_normalize(hCorrAD[bar][bin][pmt], hCorrAR[bar][bin][pmt]); + glx_normalize(hCorrTD[bar][bin][pmt], hCorrTR[bar][bin][pmt]); + + if (cor_level == 1) { + fit->SetParLimits(0, 0, 1000000); + fit->SetParameter(1, xmax); + fit->SetParLimits(1, xmax - 10, xmax + 10); + fit->SetParLimits(2, 5, 8); + + if (hCorrAD[bar][bin][pmt]->GetEntries() > 200) { + hCorrAD[bar][bin][pmt]->Fit("fgaus", "NQ", "", xmax - 30, xmax + 30); + zcorrAD = -fit->GetParameter(1); + zsigmaAD = fit->GetParameter(2); + } + if (hCorrAR[bar][bin][pmt]->GetEntries() > 200) { + hCorrAR[bar][bin][pmt]->Fit("fgaus", "NQ", "", xmax - 30, xmax + 30); + zcorrAR = -fit->GetParameter(1); + zsigmaAR = fit->GetParameter(2); + } + level = 2; + } + + if (cor_level == 0) { + fit->SetParameter(1, xmax); + fit->SetParLimits(1, xmax - 4, xmax + 4); + fit->SetParameter(2, 0.5); + fit->SetParLimits(2, 0.2, 1.0); // width + fit->FixParameter(3, 0); + fit->FixParameter(4, 0); + if (hCorrTD[bar][bin][pmt]->GetEntries() > 200) { + auto ff = hCorrTD[bar][bin][pmt]->Fit("gaus", "SNQ", "", -1.5, 1.5); + zcorrTD = -ff->Parameter(1); + zsigmaTD = ff->Parameter(2); + } + if (hCorrTR[bar][bin][pmt]->GetEntries() > 200) { + auto ff = hCorrTR[bar][bin][pmt]->Fit("gaus", "SNQ", "", -2, 2); + zcorrTR = -ff->Parameter(1); + zsigmaTR = ff->Parameter(2); + } + level = 1; + } else { + zcorrTD = acorrTD[bar][bin][pmt]; + zcorrTR = acorrTR[bar][bin][pmt]; + } + + std::cout << "L " << cor_level << " bar = " << bar << " bin = " << bin << " pmt = " << pmt + << Form(" ad %-8.5f ar %-8.5f", zcorrAD, zcorrAR) + << Form(" td %-8.5f tr %-8.5f", zcorrTD, zcorrTR) << std::endl; + + if (1) { + canv_angle = glx_canvasAddOrGet("canv_angle"); + canv_angle->cd(); + hCorrAD[bar][bin][pmt]->Fit("fgaus", "Q", "", xmax - 30, xmax + 30); + hCorrAR[bar][bin][pmt]->Fit("fgaus", "Q", "", xmax - 30, xmax + 30); + gStyle->SetOptStat(1); + gStyle->SetOptFit(1); + gStyle->SetOptTitle(1); + hCorrAD[bar][bin][pmt]->Draw(); + hCorrAR[bar][bin][pmt]->SetLineColor(kRed); + hCorrAR[bar][bin][pmt]->Draw("same"); + gPad->Modified(); + gPad->Update(); + + canv_time = glx_canvasAddOrGet("canv_time"); + canv_time->cd(); + hCorrTD[bar][bin][pmt]->Fit("gaus", "Q", "", -1.5, 1.5); + hCorrTR[bar][bin][pmt]->Fit("gaus", "Q", "", -2, 2); + hCorrTD[bar][bin][pmt]->Draw(); + hCorrTR[bar][bin][pmt]->SetLineColor(kRed); + hCorrTR[bar][bin][pmt]->Draw("same"); + // cc->Print(Form("data/corr_pmt/corr_pmt_%d_%d.png",bar,pmt)); + gPad->Update(); + gPad->WaitPrimitive(); + } + tc->Fill(); + } + } + } + + std::cout << "--- writing " << corrfile << std::endl; + tc->Write(); + fc.Write(); + fc.Close(); + } + + { + // lut correction + // for(int b=0; bGetEntries()<100) continue; + + // //glx_canvasAdd(hCorrLut[b][p]->GetName(),800,800); + + // double xmax = 0; + // fit->SetParLimits(0,0,1000000); + // fit->SetParameter(1,xmax); + // fit->SetParLimits(1,xmax-10,xmax+10); + // fit->SetParLimits(2,5,8); // width + + // hCorrLut[b][p]->Fit("fgaus","M","",-30,30); + // hLutCorr1->SetBinContent(b+1, p+1, fit->GetParameter(1)); + // //hCorrLut[b][p]->Draw(); + + // // gPad->Update(); + // // gPad->WaitPrimitive(); + // } + // } + + // glx_canvasAdd("lutcorr"+nid,800,400); + // hLutCorr->Draw("colz"); + + // glx_canvasAdd("lutcorr1",800,400); + // hLutCorr1->Draw("colz"); + } + + { // ring + glx_canvasAdd("ring" + nid, 800, 800); + hRing->Draw("colz"); + + double x0(0), y0(0), theta(mangle(2, 3)); + FitRing(x0, y0, theta, cagr); + + TVector3 rcorr(x0, y0, 1 - TMath::Sqrt(x0 * x0 + y0 * y0)); + std::cout << "Tcorr " << rcorr.Theta() << " Pcorr " << rcorr.Phi() << std::endl; + TLegend *legr = new TLegend(0.25, 0.4, 0.65, 0.6); + legr->SetFillStyle(0); + legr->SetBorderSize(0); + legr->AddEntry((TObject *)0, Form("Entries %0.0f", hRing->GetEntries()), ""); + legr->AddEntry((TObject *)0, Form("#Delta#theta_{c} %f [mrad]", rcorr.Theta() * 1000), ""); + legr->AddEntry((TObject *)0, Form("#varphi_{c} %f [rad]", rcorr.Phi()), ""); + legr->Draw(); + + TArc *arc = new TArc(x0, y0, theta); + std::cout << "XXXXXXXXXXXXXXX x0 " << x0 << " " << y0 << std::endl; + + arc->SetLineColor(kBlack); + arc->SetLineWidth(2); + arc->SetFillStyle(0); + arc->Draw(); + TArc *arcI = new TArc(0, 0, theta); + arcI->SetLineColor(kGreen); + arcI->SetLineWidth(2); + arcI->SetFillStyle(0); + // arcI->Draw(); + } + + { // hp + auto cdigi = glx_drawDigi(); + cdigi->SetName("hp" + nid); + glx_canvasAdd(cdigi); + } + + { // angle + glx_canvasAdd("angle" + nid, 800, 400); + + if (hAngle[2]->GetMaximum() > 0) hAngle[2]->Scale(1 / hAngle[2]->GetMaximum()); + if (hAngle[3]->GetMaximum() > 0) hAngle[3]->Scale(1 / hAngle[3]->GetMaximum()); + + for (int i = 0; i < 5; i++) { + if (hAngle[i]->GetEntries() < 20) continue; + + int nfound = spect->Search(hAngle[i], 1, "goff", 0.9); + if (nfound > 0) cherenkovreco[i] = spect->GetPositionX()[0]; + else cherenkovreco[i] = hAngle[i]->GetXaxis()->GetBinCenter(hAngle[i]->GetMaximumBin()); + if (cherenkovreco[i] > 0.85) cherenkovreco[i] = 0.82; + + if (i == 2) fit->SetLineColor(kBlue); + if (i == 3) fit->SetLineColor(kRed); + fit->SetParameters(100, cherenkovreco[i], 0.010, 10); + fit->SetParNames("p0", "#theta_{c}", "#sigma_{c}", "p3", "p4"); + fit->SetParLimits(0, 0.1, 1E6); + double frange = 3.5 * 0.008; + fit->SetParLimits(1, cherenkovreco[i] - frange, cherenkovreco[i] + frange); + fit->SetParLimits(2, 0.004, 0.030); // width + hAngle[i]->Fit("fgaus", "I", "", cherenkovreco[i] - frange, cherenkovreco[i] + frange); + hAngle[i]->Fit("fgaus", "M", "", cherenkovreco[i] - frange, cherenkovreco[i] + frange); + + cherenkovreco[i] = fit->GetParameter(1); + cherenkovreco_err[i] = fit->GetParError(1); + spr[i] = fit->GetParameter(2); + } + + gStyle->SetOptTitle(0); + gStyle->SetOptStat(0); + gStyle->SetOptFit(0); + + hAngle[2]->GetXaxis()->SetRangeUser(0.7, 0.9); + // hAngle[2]->GetXaxis()->SetRangeUser(0.72,0.86); + hAngle[2]->GetYaxis()->SetRangeUser(0, 1.2); + hAngle[2]->Draw(""); + hAngle[3]->Draw("same"); + + hAngleU[0]->SetLineColor(kRed + 2); + hAngleU[1]->SetLineColor(kYellow + 1); + hAngleU[2]->SetLineColor(kGreen + 1); + hAngleU[3]->SetLineColor(kOrange + 1); + + // for(auto i=0; i<4; i++) { + // if(hAngleU[i]->GetMaximum()>0) hAngleU[i]->Scale(1/hAngleU[i]->GetMaximum()); + // hAngleU[i]->Draw("same"); + // } + + // fAngle[3]->Draw("same"); + // fAngle[2]->Draw("same"); + + double mm = moms; + int c[5] = {0, 0, kBlue, kRed, 0}; + for (int i : {2, 3}) { + mAngle[i] = mangle(i, mm); + auto l = new TLine(0, 0, 0, 1000); + l->SetX1(mAngle[i]); + l->SetX2(mAngle[i]); + l->SetY1(0); + l->SetY2(1.2); + l->SetLineColor(c[i]); + l->Draw(); + + // auto lr = new TLine(0, 0, 0, 1000); + // lr->SetX1(cherenkovreco[i]); + // lr->SetX2(cherenkovreco[i]); + // lr->SetY1(0); + // lr->SetY2(1.2); + // lr->SetLineStyle(2); + // lr->SetLineColor(c[i]); + // lr->Draw(); + } + + // TLine *line3 = new TLine(0,0,0,1000); + // line3->SetLineStyle(2); + // line3->SetX1(mAngle[2]+cut_cangle); + // line3->SetX2(mAngle[2]+cut_cangle); + // line3->SetY1(0); + // line3->SetY2(1.2); + // line3->SetLineColor(1); + // line3->Draw(); + + // TLine *line4 = new TLine(0,0,0,1000); + // line4->SetLineStyle(2); + // line4->SetX1(mAngle[3]-cut_cangle); + // line4->SetX2(mAngle[3]-cut_cangle); + // line4->SetY1(0); + // line4->SetY2(1.2); + // line4->SetLineColor(1); + // line4->Draw(); + + TLegend *leg = new TLegend(0.1, 0.5, 0.4, 0.85); + leg->SetFillColor(0); + leg->SetFillStyle(0); + leg->SetBorderSize(0); + leg->SetFillStyle(0); + leg->AddEntry(hAngle[2], Form("#theta_{c}^{#pi} = %2.4f rad", cherenkovreco[2]), ""); + leg->AddEntry(hAngle[3], Form("#theta_{c}^{K} = %2.4f rad", cherenkovreco[3]), ""); + leg->AddEntry(hAngle[2], Form("#sigma_{c}^{#pi} = %2.1f mrad", spr[2] * 1000), ""); + leg->AddEntry(hAngle[3], Form("#sigma_{c}^{K} = %2.1f mrad", spr[3] * 1000), ""); + leg->Draw(); + + TLegend *lnpa = new TLegend(0.7, 0.67, 0.9, 0.85); + lnpa->SetFillColor(0); + lnpa->SetFillStyle(0); + lnpa->SetBorderSize(0); + lnpa->SetFillStyle(0); + lnpa->AddEntry(hAngle[2], "pions", "lp"); + lnpa->AddEntry(hAngle[3], "kaons", "lp"); + lnpa->Draw(); + + // fAngle[2]->Draw("same"); + // fAngle[3]->Draw("same"); + } + + { // time + glx_canvasAdd("time_2d" + nid, 800, 400); + h2Time->Draw("colz"); + + glx_canvasAdd("time" + nid, 800, 400); + hTime->Draw(); + hCalc->SetLineColor(2); + hCalc->Draw("same"); + TLegend *leg1 = new TLegend(0.5, 0.6, 0.85, 0.80); + leg1->SetFillColor(0); + leg1->SetFillStyle(0); + leg1->SetBorderSize(0); + leg1->SetFillStyle(0); + leg1->AddEntry(hTime, "measured", "lp"); + leg1->AddEntry(hCalc, "calculated", "lp"); + leg1->Draw(); + + glx_canvasAdd("time_diff" + nid, 800, 400); + hDiff->SetLineColor(kBlack); + hDiff->Draw(); + + // hDiffT->SetLineColor(kRed+1); + // hDiffT->Draw("same"); + hDiffD->SetLineColor(kGreen + 2); + hDiffD->Draw("same"); + hDiffR->SetLineColor(kBlue + 1); + hDiffR->Draw("same"); + + maxTD = hDiffD->GetXaxis()->GetBinCenter(hDiffD->GetMaximumBin()); + maxTR = hDiffR->GetXaxis()->GetBinCenter(hDiffR->GetMaximumBin()); + maxTT = hTime->GetXaxis()->GetBinCenter(hTime->GetMaximumBin()); + + // maxTD = glx_fit(hDiffD, 2.2).X(); + // maxTR = glx_fit(hDiffR, 2.2).X(); + + TLine *line = new TLine(0, 0, 0, 1000); + line->SetLineStyle(2); + line->SetX1(-cut_tdiff); + line->SetX2(-cut_tdiff); + line->SetY1(0); + line->SetY2(hDiff->GetMaximum() + 0.05 * hDiff->GetMaximum()); + line->SetLineColor(1); + // line->Draw(); + + TLine *line2 = new TLine(0, 0, 0, 1000); + line2->SetLineStyle(2); + line2->SetX1(cut_tdiff); + line2->SetX2(cut_tdiff); + line2->SetY1(0); + line2->SetY2(hDiff->GetMaximum() + 0.05 * hDiff->GetMaximum()); + line2->SetLineColor(1); + // line2->Draw(); + + TLegend *leg2 = new TLegend(0.6, 0.57, 0.9, 0.85); + leg2->SetFillColor(0); + leg2->SetFillStyle(0); + leg2->SetBorderSize(0); + leg2->SetFillStyle(0); + leg2->AddEntry(hDiff, "all", "lp"); + // leg2->AddEntry(hDiffT,"MC path in EV","lp"); + // leg2->AddEntry(hDiffD,"MC path in EV for direct photons","lp"); + // leg2->AddEntry(hDiffR,"MC path in EV for reflected photons","lp"); + leg2->AddEntry(hDiffD, "direct photons", "lp"); + leg2->AddEntry(hDiffR, "reflected photons", "lp"); + + leg2->Draw(); + } + + { // yield + glx_canvasAdd("nph" + nid, 800, 400); + + if (hNph[2]->GetEntries() > 50) { + nph = glx_fit(hNph[2], 50, 30, 50).X(); + nphm = hNph[2]->GetMean(); + auto rfit = hNph[2]->GetFunction("glx_gaust"); + if (rfit) rfit->SetLineColor(kBlue + 1); + hNph[2]->SetLineColor(kBlue); + + glx_fit(hNph[3], 40, 100, 40).X(); + rfit = hNph[3]->GetFunction("glx_gaust"); + if (rfit) rfit->SetLineColor(kRed + 1); + hNph[3]->SetLineColor(kRed); + + hNph[2]->Draw(); + hNph[3]->Draw("same"); + } + // hNphC->SetLineColor(kBlack); + // hNphC->Draw("same"); + + TLegend *lnph = new TLegend(0.6, 0.65, 0.9, 0.85); + lnph->SetFillColor(0); + lnph->SetFillStyle(0); + lnph->SetBorderSize(0); + lnph->SetFillStyle(0); + // lnph->AddEntry(hNphC,"simulated","lp"); + lnph->AddEntry(hNph[2], "pions", "lp"); + lnph->AddEntry(hNph[3], "kaons", "lp"); + lnph->Draw(); + } + + { // LH + glx_canvasAdd("lndiff" + nid, 800, 400); + TF1 *ff; + double m1 = 0, m2 = 0, s1 = 0, s2 = 0; + if (hLnDiff[3]->GetEntries() > 20) { + hLnDiff[3]->Fit("gaus", "S"); + ff = hLnDiff[3]->GetFunction("gaus"); + ff->SetLineColor(1); + m1 = ff->GetParameter(1); + s1 = ff->GetParameter(2); + } + if (hLnDiff[2]->GetEntries() > 20) { + hLnDiff[2]->Fit("gaus", "S"); + ff = hLnDiff[2]->GetFunction("gaus"); + ff->SetLineColor(1); + m2 = ff->GetParameter(1); + s2 = ff->GetParameter(2); + } + if (s1 > 0 && s2 > 0) sep = (fabs(m2 - m1)) / (0.5 * (s1 + s2)); + + hLnDiff[2]->SetTitle(Form("sep = %2.2f s.d.", sep)); + hLnDiff[2]->Draw(); + hLnDiff[3]->Draw("same"); + + gStyle->SetOptTitle(1); + TLegend *lnpl = new TLegend(0.7, 0.67, 0.9, 0.85); + lnpl->SetFillColor(0); + lnpl->SetFillStyle(0); + lnpl->SetBorderSize(0); + lnpl->SetFillStyle(0); + lnpl->AddEntry(hLnDiff[2], "pions", "lp"); + lnpl->AddEntry(hLnDiff[3], "kaons", "lp"); + lnpl->Draw(); + + std::cout << "separation = " << sep << " nph = " << nph << std::endl; + std::cout << "maxTD " << maxTD << " maxTR " << maxTR << std::endl; + glx_separation(hLnDiff[2], hLnDiff[3]); + } + + { + // kinematics + + // glx_canvasAdd("hKin"+nid,800,400); + // hrho->Draw(); + // hphi->SetLineColor(kRed); + // hphi->Draw("same"); + + // glx_canvasAdd("hCMom"+nid,800,400); + // hCMom[2]->SetMarkerColor(kBlue); + // hCMom[2]->Draw(); + // hCMom[3]->SetMarkerColor(kRed); + // hCMom[3]->Draw("same"); + // hCMom[4]->Draw("colz same"); + } + + { // wall + glx_canvasAdd("wall" + nid, 800, 800); + hWall->Draw("colz"); + double pos[] = {0.382, 0.185, 0.618, 0.815}; + double w = 0.085; + TBox *pbox[4]; + for (int i = 0; i < 4; i++) { + pbox[i] = new TBox(0.1, pos[i] - w, 0.9, pos[i] + w); + pbox[i]->Draw(); + } + } + + { + // wall + // glx_canvasAdd("wall_pos",800,800); + // for(int i=0; i<=(hWallPos->GetNbinsX()+1)*(hWallPos->GetNbinsY()+1); i++){ + // double c = hWallPos->GetBinContent(i); + // if(c!=0 && wallb[i]!=0) hWallPos->SetBinContent(i,c/wallb[i]); + // else hWallPos->SetBinContent(i,-100); + // } + // hWallPos->SetMaximum(1); + // hWallPos->SetMinimum(-1); + // hWallPos->Draw("colz"); + } + + { // chromatic corrections + glx_canvasAdd("chrom" + nid, 800, 400); + hChrom->Draw("colz"); + } + + { // track multiplicity + glx_canvasAdd("mult" + nid, 800, 400); + hMult->Draw(); + } + + glx_canvasSave("data/reco_lut_02_scan_sel_05_barscan_lr", 2); + + { // tree + TString out = glx_savepath + "/res" + nid + ".root"; + TFile fc(out, "recreate"); + TTree *tc = new TTree("reco", "reco"); + // tc->Branch("theta",&theta,"theta/D"); + // tc->Branch("phi",&phi,"prt_phi/D"); + tc->Branch("sep", &sep, "sep/D"); + tc->Branch("esep", &esep, "esep/D"); + tc->Branch("moms", &moms, "prt_mom/D"); + tc->Branch("bar", &ibar, "ibar/I"); + tc->Branch("bin", &ibin, "ibin/I"); + tc->Branch("nph", &nph, "nph/D"); + tc->Branch("nphm", &nphm, "nphm/D"); + tc->Branch("spr", &spr[3], "spr/D"); + tc->Branch("maxTD", &maxTD, "maxTD/D"); + tc->Branch("maxTR", &maxTR, "maxTR/D"); + tc->Branch("maxTT", &maxTT, "maxTT/D"); + tc->Branch("scan", &scan, "scan/D"); + tc->Branch("dx", &dx, "dx/D"); + tc->Branch("dy", &dy, "dy/D"); + + tc->Branch("cangle2", &cherenkovreco[2], "cangle2/D"); + tc->Branch("cangle3", &cherenkovreco[3], "cangle3/D"); + tc->Branch("cangle4", &cherenkovreco[4], "cangle4/D"); + tc->Branch("cangle2_err", &cherenkovreco_err[2], "cangle2_err/D"); + tc->Branch("cangle3_err", &cherenkovreco_err[3], "cangle3_err/D"); + tc->Branch("cangle4_err", &cherenkovreco_err[4], "cangle4_err/D"); + + tc->Branch("spr2", &spr[2], "spr2/D"); + tc->Branch("spr3", &spr[3], "spr3/D"); + tc->Branch("spr4", &spr[4], "spr4/D"); + + tc->Fill(); + tc->Write(); + fc.Write(); + fc.Close(); + + std::cout << "tree saved in " << out << std::endl; + } +} + +int main() { + reco_lut_02(); +} diff --git a/disk_management/.gitignore b/disk_management/.gitignore new file mode 100644 index 00000000..5d093e60 --- /dev/null +++ b/disk_management/.gitignore @@ -0,0 +1,2 @@ +*_user_reports.txt +*.html diff --git a/disk_management/create_disk_report.sh b/disk_management/create_disk_report.sh index a8817486..c9e2f8a8 100755 --- a/disk_management/create_disk_report.sh +++ b/disk_management/create_disk_report.sh @@ -7,7 +7,7 @@ webdir=/group/halld/www/halldweb/html/disk_management dmdir=/group/halld/Software/hd_utilities/disk_management $dmdir/disk_database.pl $dir $token mkdir -pv $temp_output_dir -$dmdir/disk_report.pl $token > $temp_output_dir/${token}_report.html -$dmdir/disk_report_users.pl $token $temp_output_dir +cd $temp_output_dir +$dmdir/disk_report.pl -n 20 -m 50 $token cp -pv $temp_output_dir/*.html $webdir/ diff --git a/disk_management/disk_database.pl b/disk_management/disk_database.pl index a42a93b2..1345735d 100755 --- a/disk_management/disk_database.pl +++ b/disk_management/disk_database.pl @@ -52,6 +52,7 @@ filename varchar(256), dirId int, atime datetime, + mtime datetime, size bigint, uid smallint );"; @@ -102,7 +103,7 @@ $file_no_path = $token[$#token]; #print "file_no_path = $file_no_path\n"; $file_no_path =~ s/\\/\\\\/g; # escape backslash - $sql = "insert into $file_table set filename = \"$file_no_path\", dirId = $last_id, atime = from_unixtime($stat[8]), size = $stat[7], uid = $stat[4];"; + $sql = "insert into $file_table set filename = \"$file_no_path\", dirId = $last_id, atime = from_unixtime($stat[8]), mtime = from_unixtime($stat[9]), size = $stat[7], uid = $stat[4];"; make_query($dbh_db, \$sth); } else { print "cannot stat $filename in $dirname\n"; diff --git a/disk_management/disk_management_work_max2.sql.gz b/disk_management/disk_management_work_max2.sql.gz new file mode 100644 index 00000000..21045294 Binary files /dev/null and b/disk_management/disk_management_work_max2.sql.gz differ diff --git a/disk_management/disk_report.pl b/disk_management/disk_report.pl index 69baa4c9..cd409973 100755 --- a/disk_management/disk_report.pl +++ b/disk_management/disk_report.pl @@ -6,6 +6,8 @@ parse_options(); +$this_script = $0; + $directory_label = $ARGV[0]; $dir_table = $directory_label . "_dir"; $file_table = $directory_label . "_file"; @@ -20,9 +22,16 @@ $dbh = DBI->connect("DBI:mysql:$database:$hostname", $user, $password); +if ($username) { + $html_file_name = "${directory_label}_${username}_report.html"; +} else { + $html_file_name = "${directory_label}_report.html"; +} +open(HTML, ">$html_file_name"); + $q = new CGI; # create new CGI object $title = "Disk Usage Report: $directory_label"; -print +print HTML $q->start_html($title), # start the HTML $q->h1($title), # level 1 header "

File ages are from last access time.\n"; @@ -30,7 +39,7 @@ $sql = "select updateTime from $update_time_table;"; make_query($dbh, \$sth); @row = $sth->fetchrow_array; -print "

Update time: $row[0]\n"; +print HTML "

Update time: $row[0]\n"; if ($userid) { $user_file_clause = "and $file_table.uid = $userid"; @@ -38,174 +47,199 @@ } else { $user_file_clause = ""; $user_dir_clause = ""; + %users_to_report = (); } -print $q->h2("Largest Files"), "\n"; - ############# - -$sql = "select $file_table.size, filename, atime, $file_table.uid, dirname from $dir_table, $file_table where $dir_table.id = dirId $user_file_clause order by $file_table.size desc limit 10;"; -make_query($dbh, \$sth); -print "

"$1""$2"
\n"; -print "
RankSize (GB)FileLast Access TimeOwnerDirectory\n"; -$i = 1; -while (@row = $sth->fetchrow_array) { - $size = round($row[0]/1.e9); - $user = getpwuid($row[3]); - if (! $user) {$user = "uid=$row[3]";} - print "
$i$size$row[1]$row[2]$user$row[4]\n"; - $i++; -} -print "
\n"; +%largest_files_hash = (); +$largest_files_hash{title} = "Largest Files"; +$largest_files_hash{comment} = ""; +@lf_headings = ("Rank", "Size (GB)", "File", "Last Access Time", "User", "Directory"); +$largest_files_hash{headings} = \@lf_headings; +$largest_files_hash{query} = "select format($file_table.size/1.e9, 2), filename, atime, $file_table.uid, dirname + from $dir_table, $file_table + where $dir_table.id = dirId $user_file_clause + order by $file_table.size desc + limit $nlines;"; -print $q->h2("Oldest Files"), "\n"; - ############ +%oldest_files_hash = (); +$oldest_files_hash{title} = "Oldest Files"; +$oldest_files_hash{comment} = ""; +@of_headings = ("Rank", "Last Access Time", "File", "Size (GB)", "User", "Directory"); +$oldest_files_hash{headings} = \@of_headings; +$oldest_files_hash{query} = "select atime, filename, format($file_table.size/1.e9, 2), $file_table.uid, dirname + from $dir_table, $file_table + where $dir_table.id = dirId $user_file_clause + order by atime + limit $nlines;"; -$sql = "select atime, filename, $file_table.size, $file_table.uid, dirname from $dir_table, $file_table where $dir_table.id = dirId $user_file_clause order by atime limit 10;"; -make_query($dbh, \$sth); -print "\n"; -print "
RankLast Access TimeFileSize (GB)OwnerDirectory\n"; -$i = 1; -while (@row = $sth->fetchrow_array) { - $size = round($row[2]/1.e9); - $user = getpwuid($row[3]); - if (! $user) {$user = "uid=$row[3]";} - print "
$i$row[0]$row[1]$size$user$row[4]\n"; - $i++; -} -print "
\n"; +%sizagest_files_hash = (); +$sizagest_files_hash{title} = "Files with Greatest Size × Age"; +$sizagest_files_hash{comment} = ""; +@saf_headings = ("Rank", "Size×Age (GB-years)", "File", "Size (GB)", "Last Access Time", "User", "Directory"); +$sizagest_files_hash{headings} = \@saf_headings; +$sizagest_files_hash{query} = "select format(($file_table.size*1.e-9)*(unix_timestamp(now()) - unix_timestamp(atime))/$seconds_per_year, 2) as gby, filename, format($file_table.size/1.e9, 2), atime, $file_table.uid, dirname + from $dir_table, $file_table + where $dir_table.id = dirId $user_file_clause + order by cast($file_table.size as double)*cast((unix_timestamp(now()) - unix_timestamp(atime)) as double) desc + limit $nlines;"; -print $q->h2("Files with Greatest Size × Age"), "\n"; - #################################### +%largest_dirs_hash = (); +$largest_dirs_hash{title} = "Largest Directories"; +$largest_dirs_hash{comment} = "Excludes files in sub-directories"; +@ld_headings = ("Rank", "Size (GB)", "Directory", "User"); +$largest_dirs_hash{headings} = \@ld_headings; +$largest_dirs_hash{query} = "select format(sum($file_table.size)/1.e9, 2) as dirsize, dirname, $dir_table.uid + from $dir_table, $file_table + where dirId = $dir_table.id $user_dir_clause + group by dirId + order by sum(cast($file_table.size as double)) desc + limit $nlines;"; -$sql = "select ($file_table.size*1.e-9)*(unix_timestamp(now()) - unix_timestamp(atime)) as gby, filename, $file_table.size, atime, $dir_table.uid, dirname from $dir_table, $file_table where $dir_table.id = dirId $user_file_clause order by gby desc limit 10;"; -make_query($dbh, \$sth); -print "\n"; -print "
RankSize×Age (GB-years)FileSize (GB)Last Access TimeOwnerDirectory\n"; -$i = 1; -while (@row = $sth->fetchrow_array) { - $age_size = round($row[0]/$seconds_per_year); - $size = round($row[2]/1.e9); - $user = getpwuid($row[4]); - if (! $user) {$user = "uid=$row[4]";} - print "
$i$age_size$row[1]$size$row[3]$user$row[5]\n"; - $i++; -} -print "
\n"; +%oldest_dirs_hash = (); +$oldest_dirs_hash{title} = "Directories with Greatest Average Age"; +$oldest_dirs_hash{comment} = "Excludes files in sub-directories, size-weighted average age of all files in directory"; +@od_headings = ("Rank", "Age (years)", "Directory", "User"); +$oldest_dirs_hash{headings} = \@od_headings; +$oldest_dirs_hash{query} = "select format(sum(($file_table.size*1.e-9)*(unix_timestamp(now()) - unix_timestamp(atime)))/sum($file_table.size)*1.e9/$seconds_per_year, 2) as aveage, dirname, $dir_table.uid + from $dir_table, $file_table + where $dir_table.id = dirId $user_dir_clause + group by dirId + order by sum(cast($file_table.size as double)*cast(unix_timestamp(now()) - unix_timestamp(atime) as double))/sum(cast($file_table.size as double)) desc + limit $nlines;"; -print $q->h2("Largest Directories"), "\nExcludes files in sub-directories\n"; - ################### -$sql = "select sum($file_table.size) as dirsize, dirname, $dir_table.uid from $dir_table, $file_table where dirId = $dir_table.id $user_dir_clause group by dirId order by dirsize desc limit 10;"; -make_query($dbh, \$sth); -print "\n"; -print "
RankSize (GB)DirectoryOwner\n"; -$i = 1; -while (@row = $sth->fetchrow_array) { - $size = round($row[0]/1.e9); - $user = getpwuid($row[2]); - if (! $user) {$user = "uid=$row[2]";} - print "
$i$size$row[1]$user\n"; - $i++; -} -print "
\n"; +%sizagest_dirs_hash = (); +$sizagest_dirs_hash{title} = "Directories with Greatest Size × Age"; +$sizagest_dirs_hash{comment} = "Excludes files in sub-directories, age × size summed over all files in directory"; +@sad_headings = ("Rank", "Size×Age (GB-years)", "Directory", "User"); +$sizagest_dirs_hash{headings} = \@sad_headings; +$sizagest_dirs_hash{query} = "select format(sum(($file_table.size*1.e-9)*(unix_timestamp(now()) - unix_timestamp(atime)))/$seconds_per_year, 2) as sumgby, dirname, $dir_table.uid + from $dir_table, $file_table + where $dir_table.id = dirId $user_dir_clause + group by dirId + order by sum(cast($file_table.size as double)*cast(unix_timestamp(now()) - unix_timestamp(atime) as double)) desc + limit $nlines;"; -print $q->h2("Directories with Greatest Average Age"), - ########################################## - "\nExcludes files in sub-directories, size-weighted average age of all files in directory\n"; +%largest_users_hash = (); +$largest_users_hash{title} = "Largest Total File Size by User"; +$largest_users_hash{comment} = "Sum of all files owned by user"; +@lu_headings = ("Rank", "Total Size (GB)", "User"); +$largest_users_hash{headings} = \@lu_headings; +$largest_users_hash{query} = "select format(sum($file_table.size)/1.e9, 2) as sumsize, $file_table.uid + from $file_table + group by $file_table.uid + order by sum(cast($file_table.size as double)) desc + limit $nlines;"; -$sql = "select sum(($file_table.size*1.e-9)*(unix_timestamp(now()) - unix_timestamp(atime)))/sum($file_table.size)*1.e9 as aveage, dirname, $dir_table.uid from $dir_table, $file_table where $dir_table.id = dirId $user_dir_clause group by dirId order by aveage desc limit 10;"; -make_query($dbh, \$sth); -print "\n"; -print "
RankAge (years)DirectoryOwner\n"; -$i = 1; -while (@row = $sth->fetchrow_array) { - $age_size = round($row[0]/$seconds_per_year); - $user = getpwuid($row[2]); - if (! $user) {$user = "uid=$row[2]";} - print "
$i$age_size$row[1]$user\n"; - $i++; -} -print "
\n"; +%oldest_users_hash = (); +$oldest_users_hash{title} = "Greatest Average File Age by User"; +$oldest_users_hash{comment} = "Size-weighted average age of all files owned by user"; +@ou_headings = ("Rank", "Age (years)", "User"); +$oldest_users_hash{headings} = \@ou_headings; +$oldest_users_hash{query} = "select format(sum(($file_table.size*1.e-9)*(unix_timestamp(now())-unix_timestamp(atime)))/sum($file_table.size)*1.e9/$seconds_per_year, 2) as aveage, $file_table.uid + from $file_table + group by $file_table.uid + order by sum(cast($file_table.size as double)*cast(unix_timestamp(now())-unix_timestamp(atime) as double))/sum(cast($file_table.size as double)) desc + limit $nlines;"; -print $q->h2("Directories with Greatest Size × Age"), - ########################################## - "\nExcludes files in sub-directories, age × size summed over all files in directory\n"; +%sizagest_users_hash = (); +$sizagest_users_hash{title} = "Largest Total Size × Age by User"; +$sizagest_users_hash{comment} = "Age × size summed over all files owned by user"; +@sau_headings = ("Rank", "Size×Age (GB-years)", "User"); +$sizagest_users_hash{headings} = \@sau_headings; +$sizagest_users_hash{query} = "select format(sum(($file_table.size*1.e-9)*(unix_timestamp(now())-unix_timestamp(atime)))/$seconds_per_year, 2) as sumgby, $file_table.uid + from $file_table + group by $file_table.uid + order by sum(cast($file_table.size as double)*cast(unix_timestamp(now())-unix_timestamp(atime) as double)) desc + limit $nlines;"; -$sql = "select sum(($file_table.size*1.e-9)*(unix_timestamp(now()) - unix_timestamp(atime))) as sumgby, dirname, $dir_table.uid from $dir_table, $file_table where $dir_table.id = dirId $user_dir_clause group by dirId order by sumgby desc limit 10;"; -make_query($dbh, \$sth); -print "\n"; -print "
RankSize×Age (GB-years)DirectoryOwner\n"; -$i = 1; -while (@row = $sth->fetchrow_array) { - $age_size = round($row[0]/$seconds_per_year); - $user = getpwuid($row[2]); - if (! $user) {$user = "uid=$row[2]";} - print "
$i$age_size$row[1]$user\n"; - $i++; -} -print "
\n"; +%fileest_users_hash = (); +$fileest_users_hash{title} = "Largest Number of Files by User"; +$fileest_users_hash{comment} = ""; +@fu_headings = ("Rank", "Files", "User"); +$fileest_users_hash{headings} = \@fu_headings; +$fileest_users_hash{query} = "select format(count(*), 0), uid, count(*) as c + from $file_table + group by uid + order by c desc + limit $nlines;"; if (!$userid) { + do_one_section(\%sizagest_users_hash, "href"); + do_one_section(\%largest_users_hash, "href"); + do_one_section(\%fileest_users_hash, "href"); +} +do_one_section(\%sizagest_dirs_hash); +do_one_section(\%largest_dirs_hash); +do_one_section(\%sizagest_files_hash); +do_one_section(\%largest_files_hash); - print $q->h2("Largest Total File Size by User"), - ############################### - "\nSum of all files owned by user"; +print HTML $q->end_html; # end the HTML + +print HTML "\n"; - $sql = "select sum($file_table.size) as sumsize, $file_table.uid from $file_table group by $file_table.uid order by sumsize desc limit 10;"; - make_query($dbh, \$sth); - print "\n"; - print "
RankTotal Size (GB)User"; - $i = 1; - while (@row = $sth->fetchrow_array) { - $size = round($row[0]/1.e9); - $user = getpwuid($row[1]); - if (! $user) {$user = "uid=$row[1]";} - print "
$i$size$user\n"; - $i++; +if (!$user_report) { + foreach $user_key (keys %users_to_report) { + $command = "$this_script -u $user_key -n $nlines_users $directory_label"; + print "executing $command\n"; + system($command); } - print "
\n"; - - print $q->h2("Greatest Average File Age by User"), - ###################################### - "Size-weighted average age of all files owned by user\n"; +} - $sql = "select sum(($file_table.size*1.e-9)*(unix_timestamp(now())-unix_timestamp(atime)))/sum($file_table.size)*1.e9 as aveage, $file_table.uid from $file_table group by $file_table.uid order by aveage desc limit 10;"; - make_query($dbh, \$sth); - print "\n"; - print "
RankAge (years)User\n"; - $i = 1; - while (@row = $sth->fetchrow_array) { - $age_size = round($row[0]/$seconds_per_year); - $user = getpwuid($row[1]); - if (! $user) {$user = "uid=$row[1]";} - print "
$i$age_size$user\n"; - $i++; +exit; +# +# output html for one section +# +sub do_one_section { + %section_hash = %{$_[0]}; + if ($_[1] eq "href") { + $add_href = 1; + $href_prefix = ""; + $href_suffix = ""; + } else { + $add_href = 0; } - print "
\n"; - - print $q->h2("Largest Total Size × Age by User"), - ###################################### - "\nAge × size summed over all files owned by user\n"; - - $sql = "select sum(($file_table.size*1.e-9)*(unix_timestamp(now())-unix_timestamp(atime))) as sumgby, $file_table.uid from $file_table group by $file_table.uid order by sumgby desc limit 10;"; + print HTML $q->h2($section_hash{title}), "\n"; + my $comment = $section_hash{comment}; + if ($comment) {print HTML $comment, "\n";} + $sql = $section_hash{query}; make_query($dbh, \$sth); - print "\n"; - print "
RankSize×Age (GB-years)User\n"; + print HTML "\n"; + @hray = @{$section_hash{headings}}; + print HTML ""; + $iuser = -1; # no header is called "User" + for ($ih = 0; $ih <= $#hray; $ih++) { + $hthis = $hray[$ih]; + print HTML "
$hthis"; + if ($hthis eq "User") { # this column is a uid, mark it as such + $iuser = $ih; + } + } + print HTML "\n"; $i = 1; while (@row = $sth->fetchrow_array) { - $age_size = round($row[0]/$seconds_per_year); - $user = getpwuid($row[1]); - if (! $user) {$user = "uid=$row[1]";} - print "
$i$age_size$user\n"; + if ($iuser >= 0) { + $iu = $iuser - 1; + $user = getpwuid($row[$iu]); + if (! $user) { + $user = "uid=$row[$iu]"; + } + if ($add_href) { + $row[$iu] = "${href_prefix}${user}${href_middle}${user}${href_suffix}"; + $users_to_report{$user} = 1; + } else { + $row[$iu] = $user; + } + } + print HTML "
$i"; + for ($ih = 0; $ih < $#hray; $ih++) { + print HTML "$row[$ih]"; + } + print HTML "\n"; $i++; } - print "
\n"; + print HTML "
\n"; } - -print $q->end_html; # end the HTML - -print "\n"; - -exit; # # make a query # @@ -220,23 +254,50 @@ sub make_query { return 0; } -sub round { - my ($float) = @_; - $rounded_float = sprintf("%.2f", $float); - return $rounded_float; -} # # parse options # sub parse_options { - getopts('u:'); + getopts('hu:n:m:'); + if ($opt_h) { + print_usage(); + exit 0; + } if ($opt_u) { - $userid = `id -u $opt_u`; - chomp $userid; + $user_report = 1; + $username = $opt_u; + if ($username =~ m/^uid=/) { + #print ("username is likely of the form uid=1234\n"); + @tokens = split(/=/, $username); + $userid = $tokens[1]; + } else { + $userid = `id -u $username`; + chomp $userid; + } + if (!$userid) {$userid = 0;} + #print "$username, $userid\n" } else { - $userid = 0 + $user_report = 0; + $username = ""; + $userid = 0; } - #print "userid = $userid\n;" + #print "userid = $userid\n"; + if ($opt_n) { + $nlines = $opt_n; + } else { + $nlines = 10; # default number of lines + } + if ($opt_m) { + $nlines_users = $opt_m; + } else { + $nlines_users = 20; # default number of lines + } +} +sub print_usage { + print < [] + +```sh +python get_file_time.py /cache/halld/RunPeriod-2021-08/rawdata/Run081717/hd_rawdata_081717_010.evio time_81717_010.txt + +``` diff --git a/get_file_time/get_file_time.py b/get_file_time/get_file_time.py new file mode 100644 index 00000000..a648d702 --- /dev/null +++ b/get_file_time/get_file_time.py @@ -0,0 +1,241 @@ +# Usage: python get_file_time.py [] +# +# This script attempts to find the time when the first data in the supplied evio file were recorded, using information stored in the supplied file and the first file for the same run, if that is available, or RCDB if it is not. Using RCDB gives less accurate results. +# +# +# The prestart event starts a 250 MHz clock which is recorded for each event in DCODAROCInfo. Physics data recording starts after the go event. +# +# Both prestart and go events are usually in file 000. +# +# The run start time is recorded in RCDB a few seconds after the go event. +# +# The script uses hd_dump to look for prestart and go event times and the timestamp of the first event, all in file 000. +# +# If file 000 is not found, it uses the run start time from RCDB +# +# It then reads the timestamp for the first event in the supplied file, converts the difference in timestamps into difference in seconds, and adds that to the start time. +# +# If an output file is specified as the 2nd command line argument, the prestart time and file time are written to that file, followed by 'RCDB' if RCDB was used to estimate the prestart time. +# +# Naomi Jarvis 1 Dec 2021 + + +import sys +import os +import subprocess +import glob +import rcdb +import re +import time +from datetime import datetime + + +def parsefilename(eviofile): + # deconstruct the evio filename assuming it takes the form of + # base_dir/hd_rawdata_run_file.evio or base_dir/hd_rawdata_run_file.skim_name.evio + + loc = eviofile.find('hd_rawdata_') + 18 # start of the file number + base = eviofile[0:loc] + filenum = eviofile[loc:loc+3] + ext = eviofile[loc+3:] + + if not filenum.isdigit(): + print('Could not extract the file number from the file name, expected to find hd_rawdata__') + eviofile0 = '' + filenum = 0 + else: + eviofile0 = base + '000' + ext + + return eviofile0,filenum + + +def getrunnumber(filename): + + dumpf=open(filename,'r') + + line='start ' + while line[:4] != 'Run:' and len(line)>0: + line=dumpf.readline() + if not line: + break + + if len(line) == 0: # eof + print('Run number was not found in the file.') + return 0 + + return line[4:len(line)-1] # exclude the newline + + dumpf.close() + + +def getprestarttime(filename): + + dumpf=open(filename,'r') + + line='start ' + while line[:18] != 'DCODAControlEvent:' and len(line)>0: + line=dumpf.readline() + if not line: + break + + if len(line) == 0: # eof + print('DCODAControlEvent was not found in the file.') + return 0 + + dumpf.readline() # event_type: unix_time: Nwords: + dumpf.readline() #--------------------------------------------------------------- + line=dumpf.readline() # ffd2 1630144027 5 + + event_type=line.split()[0] + unixtime=line.split()[1] + +# if event_type != 'ffd2' : +# print event_type +# print 'Second control event was not GO!' +# unixtime=0 + + return unixtime + + dumpf.close() + + + +def geteventtime(filename): + + dumpf=open(filename,'r') + + line='start ' + while line[:13] != 'DCODAROCInfo:': + line=dumpf.readline() + if not line: + break + + if len(line) == 0: # eof + print('DCODAROCInfo was not found in the file.') + return 0 + + dumpf.readline() # rocid: timestamp: Nmisc: + dumpf.readline() #----------------------------- + line=dumpf.readline() # 34 620160547174 0 + + timestamp=line.split()[1] + + return timestamp + + dumpf.close() + + +#------------------------------- + +if not sys.argv[1]: + + exit('Usage: python getfiletime.py []') + +eviofile = sys.argv[1] + + +if len(sys.argv) > 2: + outputfile = sys.argv[2] + # Open output file now, so that it is empty if the time is not found + outf = open(outputfile,'w') +else: + outf = False + + +if not os.path.exists(eviofile): + exit('File not found: '+eviofile) + + +eviofile0,filenum = parsefilename(eviofile) + + +if os.path.exists(eviofile0): + print('The start time for the run will be determined from '+eviofile0) + usedrcdb = False +else: + print('The start time for a file can be obtained from the start time for the run plus the difference between the clock times at prestart and at the start of the file. The start time for this run will be estimated from RCDB, because file 000 was not found. The estimated start time for this file will be delayed by the (unknown) interval between the prestart and go events. For a more accurate time, provide file 000. '+eviofile0) + usedrcdb = True + + +# dump times from supplied file + +times_thisfile='_temp1.txt' + +outputfile=open(times_thisfile,"w") +subprocess.call(["hd_dump", eviofile, "-q3","-DCODAControlEvent", "-DCODAROCInfo"],stdout=outputfile,stderr=subprocess.STDOUT) +outputfile.close() + + +# extract run number and first event time and also prestart time if it's file 0. + +run = getrunnumber(times_thisfile) + + +if (filenum == '000'): + start_unixtime = getprestarttime(times_thisfile) + #print 'prestart unix time is ',start_unixtime + if start_unixtime == 0 : + exit('Could not find the prestart event.') + + dt_runstart = datetime.fromtimestamp(float(start_unixtime)) + +else: + + if os.path.exists(eviofile0): + #print 'Looking for time of prestart event' + # get prestart time + times_file0='_temp0.txt' + + outputfile=open(times_file0,"w") + subprocess.call(["hd_dump", eviofile0, "-q3","-DCODAControlEvent", "-DCODAROCInfo"],stdout=outputfile,stderr=subprocess.STDOUT) + outputfile.close() + + start_unixtime = getprestarttime(times_file0) + # print 'prestart unix time is ',start_unixtime + + if start_unixtime == 0 : + exit('Could not find the prestart event.') + + + dt_runstart = datetime.fromtimestamp(float(start_unixtime)) + + else: + # get start time from rcdb + if run == 0 : + exit('RCDB cannot provide a start time for run 0.') + else : + db = rcdb.RCDBProvider("mysql://rcdb@hallddb/rcdb") + intrun = int(run) + runs = db.select_runs("", intrun, intrun) + + dt_runstart = runs[0].start_time + start_unixtime = time.mktime(dt_runstart.timetuple()) - 2 #subtract 2 seconds as rcdb is always a bit late + # no idea what to subtract to estimate for the prestart to go interval + + +# find first event time from current file + +clocktime = geteventtime(times_thisfile) + +if clocktime == 0 : + exit('Could not find any DCODAROCInfo from which to obtain a timestamp.') + + +timediff = float(clocktime)/2.5e8 + +thisfile_timestamp = float(start_unixtime) + timediff + +dt_thisfile = datetime.fromtimestamp(thisfile_timestamp) + +print('prestart '+dt_runstart.strftime("%Y-%m-%d %H:%M:%S")) +print('start of file '+dt_thisfile.strftime("%Y-%m-%d %H:%M:%S")) + +if outf: + outf.write(dt_thisfile.strftime("%Y-%m-%d %H:%M:%S")+'\n') + if usedrcdb: + outf.write('RCDB\n') + outf.close() + + + + diff --git a/hdi_conversion/convert_halld_recon.sh b/hdi_conversion/convert_halld_recon.sh new file mode 100755 index 00000000..6d3a5e42 --- /dev/null +++ b/hdi_conversion/convert_halld_recon.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Make a new version of halld_recon that has only source code that +# appears in hd_interface. All other source code is deleted. The +# resulting repo can be used as a basis for a new hd_interface with +# up-to-date versions of the code in halld_recon. +# +# specify the hd_interface clone +hd_interface=/home/marki/git/hd_interface +# specify the hdi_conversion directory of hd_utilities +hdi_conversion=/home/marki/git/hd_utilities/hdi_conversion +# specify where the halld_recon clone will go +repo_dir=hdi_interface_from_hdr +# clone halld_recon +rm -rf $repo_dir +git clone -b for_hdi_conversion git@github.com:markito3/halld_recon $repo_dir +# go to the halld_recon clone +cd $repo_dir +# +# Write ../command.sh which contains a "git filter-repo" command to +# drop all directories except those that contain code destined for +# hd_interface. See https://github.com/newren/git-filter-repo . +# +# add the git filter-repo command to command.sh +rm -f ../command.sh +echo git filter-repo \\ > ../command.sh +# add directories to be passed through filter to command.sh +awk '{print " --path "$1" \\"}' < $hdi_conversion/filter.txt \ + >> ../command.sh +# add a blank line to command.sh +echo >> ../command.sh +# execute command.sh, i.e., do the git filter-repo +. ../command.sh +# delete all branches +git branch| awk '{print "git branch -D "$1}' | bash | grep -v "Deleted branch" +# move the "src" directory to "old_src", git-wise +git mv src old_src +git commit -m "src moved to old_src" | grep -v "rename" +# move *.cc files, present in hd_interface, from old_src to src, git-wise +find $hd_interface -type f -name \*.cc | $hdi_conversion/move_cc.sh +# move *.h files, present in hd_interface, from old_src to src, git-wise +find $hd_interface -type f -name \*.h | $hdi_conversion/move_h.sh +exit # for test, do not go further +# commit the changes +git commit -m "move done" | grep -v "rename" +# remove the old_src directory +git rm -r old_src | grep -v "rm " +# commit the removal +git commit -m "old_src deleted" | grep -v "delete mode" +# look at status +git status diff --git a/hdi_conversion/filter.txt b/hdi_conversion/filter.txt new file mode 100644 index 00000000..7ce8a617 --- /dev/null +++ b/hdi_conversion/filter.txt @@ -0,0 +1,27 @@ +src/libraries/include +src/libraries/BCAL +src/libraries/CCAL +src/libraries/CDC +src/libraries/CERE +src/libraries/DANA +src/libraries/DAQ +src/libraries/DIRC +src/libraries/EVENTSTORE +src/libraries/FCAL +src/libraries/FDC +src/libraries/FMWPC +src/libraries/HDDM +src/libraries/HDGEOMETRY +src/libraries/KINFITTER +src/libraries/PAIR_SPECTROMETER +src/libraries/PID +src/libraries/RF +src/libraries/START_COUNTER +src/libraries/TAC +src/libraries/TAGGER +src/libraries/TOF +src/libraries/TPOL +src/libraries/TRACKING +src/libraries/TRD +src/libraries/TRIGGER +src/libraries/TTAB diff --git a/hdi_conversion/move_cc.sh b/hdi_conversion/move_cc.sh new file mode 100755 index 00000000..a3292c36 --- /dev/null +++ b/hdi_conversion/move_cc.sh @@ -0,0 +1,10 @@ +#!/bin/bash +while read line +do + command=`echo $line | awk -F'/src/' '{print "full_path="$2}'` + eval $command + command=`echo $full_path | awk -F/ '{print "dir="$1}'` + eval $command + mkdir -p src/$dir + git mv old_src/libraries/$full_path src/$full_path +done diff --git a/hdi_conversion/move_h.sh b/hdi_conversion/move_h.sh new file mode 100755 index 00000000..bb410522 --- /dev/null +++ b/hdi_conversion/move_h.sh @@ -0,0 +1,16 @@ +#!/bin/bash +while read line +do + command=`echo $line | awk -F'/include/' '{print "full_path="$2}'` + eval $command + if [[ $full_path =~ / ]] + then + command=`echo $full_path | awk -F/ '{print "dir="$1}'` + eval $command + mkdir -p include/$dir + git mv old_src/libraries/$full_path include/$full_path + else + mkdir -p include + git mv old_src/libraries/include/$full_path include/$full_path + fi +done diff --git a/kinfitter/monitoring/GraphRunPeriod_pippimpi0_ver1.py b/kinfitter/monitoring/GraphRunPeriod_pippimpi0_ver1.py new file mode 100755 index 00000000..0fac6362 --- /dev/null +++ b/kinfitter/monitoring/GraphRunPeriod_pippimpi0_ver1.py @@ -0,0 +1,863 @@ +#!/usr/bin/env python + +# Standard imports +from optparse import OptionParser +import os.path +import os +import sys +import subprocess +import glob +from array import * +from math import sqrt, exp + +#Import ROOT modules +from ROOT import * + +TOP_LEVEL_DATA_LOC = "/w/halld-scshelf2101/home/jzarling/ForPeople/ForMadison/hd_root_files_FA2018_DATA/" +TOP_LEVEL_MC_LOC = "/w/halld-scshelf2101/home/jzarling/ForPeople/ForMadison/hd_root_files_FA2018_MC/" + +def IsTDir(tobject): + if("TDirectoryFile" in str(type(tobject))): return True + else: return False + +def main(argv): + #Usage controls from OptionParser + parser_usage = "" + parser = OptionParser(usage = parser_usage) + (options, args) = parser.parse_args(argv) + if(len(args) != 0): + parser.print_help() + return + + gROOT.SetBatch(True) + +#COLLECTING FILE NAMES + #DATA + loopcount_data = 0 # This variable counts which file number we are on + filename_list_data = glob.glob(TOP_LEVEL_DATA_LOC+"/hd_root_0*.root") #file name is now an array variable that holds all the files we want + #CHANGE ABOVE LINE TO THE DIRECTORY/FILES YOU NEED + NumberOfRuns_data = len(filename_list_data) # Number of Runs specific to this data sample + + #MC + loopcount_mc = 0 # This variable counts which file number we are on + filename_list_mc = glob.glob(TOP_LEVEL_MC_LOC+"/hd_root_0*.root") #file name is now an array variable that holds all the files we want + #CHANGE ABOVE LINE TO THE DIRECTORY/FILES YOU NEED + NumberOfRuns_mc = len(filename_list_mc) # Number of Runs specific to this data sample + + x_axis_err =array('f',[]) + for i in range(NumberOfRuns_data): x_axis_err.append(0.) + +#GETS THE STEP... INFO + #DATA + subsubkey_array_data = [] + pullname_array_data = [] + tempnum_data=0 + f_data = TFile.Open(TOP_LEVEL_DATA_LOC+'/hd_root_050685.root') #this is needed just to get this f bit to run; f will be redefined later + #CHANGE ABOVE LINE TO THE DIRECTORY/FILES YOU NEED; THE NUMBER AT THE END CAN BE ANY FILE THAT IS CONTAINED WITHIN THE DATASET + topdir_keys_data = f_data.GetListOfKeys() + for key_data in topdir_keys_data: + key_string_data = key_data.GetTitle() + print "Keyname: " + key_string_data #gets the keyname (ie. pi0pippim__B4_M7) + if key_string_data == 'pi0pippim__B4_F1_M7': #CHANGE TO THE REACTION YOU WANT TO STUDY + subdir_data = f_data.Get(key_string_data) + subdir_keys_data = subdir_data.GetListOfKeys() + for subkey_data in subdir_keys_data: + subkey_string_data = subkey_data.GetTitle() + print "Subkey: " + subkey_string_data #gets the subkey (ie. Hist_KinFitResults) + print "Trying to access: " + key_string_data+"/"+subkey_string_data + if subkey_string_data == 'Hist_KinFitResults': + subsubdir_data = f_data.Get(key_string_data+"/"+subkey_string_data) + print "subsubdir type: " +str(type(subsubdir_data)) + if IsTDir(subsubdir_data): + subsubdir_keys_data = subsubdir_data.GetListOfKeys() + for subsubkey_data in subsubdir_keys_data: + subsubkey_string_data = subsubkey_data.GetTitle() + print "Subsubkey title: " + subsubkey_string_data #gets the subsubkey (ie.Step0__Photon_Proton__Pi0_Pi+_Pi-_Proton) + if 'Step' in subsubkey_string_data: + tempnum_data=int(subsubkey_string_data[4]) + subsubkey_array_data.insert(tempnum_data, subkey_string_data+'/'+subsubkey_string_data) + + #MC + subsubkey_array_mc = [] + pullname_array_mc = [] + tempnum_mc=0 + f_mc = TFile.Open(TOP_LEVEL_MC_LOC+'/hd_root_050685.root') #this is needed just to get this f bit to run; f will be redefined later + #CHANGE ABOVE LINE TO THE DIRECTORY/FILES YOU NEED; THE NUMBER AT THE END CAN BE ANY FILE THAT IS CONTAINED WITHIN THE DATASET + topdir_keys_mc = f_mc.GetListOfKeys() + for key_mc in topdir_keys_mc: + key_string_mc = key_mc.GetTitle() + print "Keyname: " + key_string_mc #gets the keyname (ie. pi0pippim__B4_M7) + if key_string_mc == 'pi0pippim__B4_F1_M7': #CHANGE TO THE REACTION YOU WANT TO STUDY + subdir_mc = f_mc.Get(key_string_mc) + subdir_keys_mc = subdir_mc.GetListOfKeys() + for subkey_mc in subdir_keys_mc: + subkey_string_mc = subkey_mc.GetTitle() + print "Subkey: " + subkey_string_mc #gets the subkey (ie. Hist_KinFitResults) + print "Trying to access: " + key_string_mc+"/"+subkey_string_mc + if subkey_string_mc == 'Hist_KinFitResults': + subsubdir_mc = f_mc.Get(key_string_mc+"/"+subkey_string_mc) + print "subsubdir type: " +str(type(subsubdir_mc)) + if IsTDir(subsubdir_mc): + subsubdir_keys_mc = subsubdir_mc.GetListOfKeys() + for subsubkey_mc in subsubdir_keys_mc: + subsubkey_string_mc = subsubkey_mc.GetTitle() + print "Subsubkey title: " + subsubkey_string_mc #gets the subsubkey (ie.Step0__Photon_Proton__Pi0_Pi+_Pi-_Proton) + if 'Step' in subsubkey_string_mc: + tempnum_mc=int(subsubkey_string_mc[4]) + subsubkey_array_mc.insert(tempnum_mc, subkey_string_mc+'/'+subsubkey_string_mc) + + + print 'subsubkey_array_data = ' + print subsubkey_array_data + print 'subsubkey_array_mc = ' + print subsubkey_array_mc + + counter = 0 + +#DATA + for partfilename_data in subsubkey_array_data: + partfilename_mc = subsubkey_array_mc[counter] + for var2 in ["Proton", "Pi+", "Pi-", "Photon"]: + + +#DATA + run_vec_data = array('f') # Will store all run numbers in an array. we will use this as the x-axis in a graph later + pull_var_list_data = ["Pull_Px", "Pull_Py", "Pull_Pz", "Pull_E"] + RMS_array_data = {var1 : array('f') for var1 in pull_var_list_data} # Stores all RMSs in an array in a dictionary. y-axis in graph + mean_array_data = {var1 : array('f') for var1 in pull_var_list_data} # Stores all means in an array in a dictionary. y-axis in graph + skewness_array_data = {var1 : array('f') for var1 in pull_var_list_data} # Stores all skewness's in an array in a dictionary. y-axis in graph + kurtosis_array_data = {var1 : array('f') for var1 in pull_var_list_data} # Stores all kurtosis's in an array in a dictionary. y-axis in graph + RMS_array_data_err = {var1 : array('f') for var1 in pull_var_list_data} # Stores all RMS errors in an array + mean_array_data_err = {var1 : array('f') for var1 in pull_var_list_data} # Stores all mean errors in an array + skewness_array_data_err = {var1 : array('f') for var1 in pull_var_list_data} # Stores all skewness errors in an array + kurtosis_array_data_err = {var1 : array('f') for var1 in pull_var_list_data} # Stores all kurtosis errors in an array + for loopcount_data, filename_data in enumerate(filename_list_data): # opening loop that collects all (x, y) + + f_data = TFile.Open(filename_data) + print("Current file:", filename_data) + run_vec_data.append(int(filename_data[-10:-5])) + for var1 in pull_var_list_data: + tempfilename_data = "pi0pippim__B4_F1_M7/"+partfilename_data+"/"+var2+"/"+var1 #CHANGE TO THE REACTION YOU WANT TO STUDY + print tempfilename_data + if tempfilename_data: + pathname_data = "pi0pippim__B4_F1_M7/"+partfilename_data+"/"+var2+"/"+var1 #CHANGE TO THE REACTION YOU WANT TO STUDY + h_data = f_data.Get("pi0pippim__B4_F1_M7/"+partfilename_data+"/"+var2+"/"+var1) #CHANGE TO THE REACTION YOU WANT TO STUDY + if hasattr(h_data, 'GetEntries'): + #Data + num_entries_data = h_data.GetEntries() # Number of times a value was entered into the histogram + mean_data = h_data.GetMean() # Avarage value of this histogram (we want it to be 0) + RMS_data = h_data.GetRMS() #standard deviation + skewness_data = h_data.GetSkewness() + kurtosis_data = h_data.GetKurtosis() + mean_array_data[var1].append(float(mean_data)) #these will be used in the graphs + RMS_array_data[var1].append(float(RMS_data)) + skewness_array_data[var1].append(float(skewness_data)) + kurtosis_array_data[var1].append(float(kurtosis_data)) + #errors + mean_data_err = h_data.GetMean(11) # Avarage vlaue of this histogram (we wnat it to be 0) + RMS_data_err = h_data.GetRMS(11) + skewness_data_err = h_data.GetSkewness(11) + kurtosis_data_err = h_data.GetKurtosis(11) + mean_array_data_err[var1].append(float(mean_data_err)) #these will be used in the graphs + RMS_array_data_err[var1].append(float(RMS_data_err)) + skewness_array_data_err[var1].append(float(skewness_data_err)) + kurtosis_array_data_err[var1].append(float(kurtosis_data_err)) + print("Number of entries in histogram", num_entries_data) #Prints out the results for this data file + print("Histogram average:", mean_data) + print("Standard Deviation (RMS):", RMS_data) + print("Histogram skewness:", skewness_data) + print("Histogram kurtosis:", kurtosis_data) + print("Current file:", filename_data) + print("Current loop count:", loopcount_data) + print("The current variable is:", var1) + print("The current outter variable is:", var2) + print("The current path name is: ", pathname_data) + + +#MC + run_vec_mc = array('f') # Will store all run numbers in an array. we will use this as the x-axis in a graph later + pull_var_list_mc = ["Pull_Px", "Pull_Py", "Pull_Pz", "Pull_E"] + RMS_array_mc = {var1 : array('f') for var1 in pull_var_list_mc} # Stores all RMSs in an array in a dictionary. y-axis in graph + mean_array_mc = {var1 : array('f') for var1 in pull_var_list_mc}# Stores all means in an array in a dictionary. y-axis in graph + skewness_array_mc = {var1 : array('f') for var1 in pull_var_list_mc} + kurtosis_array_mc = {var1 : array('f') for var1 in pull_var_list_mc} + RMS_array_mc_err = {var1 : array('f') for var1 in pull_var_list_mc} # Stores all RMSs in an array in a dictionary. y-axis in graph + mean_array_mc_err = {var1 : array('f') for var1 in pull_var_list_mc}# Stores all means in an array in a dictionary. y-axis in graph + skewness_array_mc_err = {var1 : array('f') for var1 in pull_var_list_mc} + kurtosis_array_mc_err = {var1 : array('f') for var1 in pull_var_list_mc} + for loopcount_mc, filename_mc in enumerate(filename_list_mc): # opening loop that collects all (x, y) + + f_mc = TFile.Open(filename_mc) + print("Current file:", filename_mc) + run_vec_mc.append(int(filename_mc[-10:-5])) + for var1 in pull_var_list_mc: + tempfilename_mc = "pi0pippim__B4_F1_M7/"+partfilename_mc+"/"+var2+"/"+var1 #CHANGE TO THE REACTION YOU WANT TO STUDY + print tempfilename_mc + if tempfilename_mc: + pathname_mc = "pi0pippim__B4_F1_M7/"+partfilename_mc+"/"+var2+"/"+var1 #CHANGE TO THE REACTION YOU WANT TO STUDY + h_mc = f_mc.Get("pi0pippim__B4_F1_M7/"+partfilename_mc+"/"+var2+"/"+var1) #CHANGE TO THE REACTION YOU WANT TO STUDY + if hasattr(h_mc, 'GetEntries'): + + num_entries_mc = h_mc.GetEntries() # Number of times a value was entered into the histogram + #MC + mean_mc = h_mc.GetMean() # Avarage vlaue of this histogram (we want it to be 0) + RMS_mc = h_mc.GetRMS() #standard deviation + skewness_mc = h_mc.GetSkewness() + kurtosis_mc = h_mc.GetKurtosis() + mean_array_mc[var1].append(float(mean_mc)) #these will be used in the graphs + RMS_array_mc[var1].append(float(RMS_mc)) + skewness_array_mc[var1].append(float(skewness_mc)) + kurtosis_array_mc[var1].append(float(kurtosis_mc)) + #error + mean_mc_err = h_mc.GetMean(11) # Avarage vlaue of this histogram (we wnat it to be 0) + RMS_mc_err = h_mc.GetRMS(11) + skewness_mc_err = h_mc.GetSkewness(11) + kurtosis_mc_err = h_mc.GetKurtosis(11) + mean_array_mc_err[var1].append(float(mean_mc_err)) #these will be used in the graphs + RMS_array_mc_err[var1].append(float(RMS_mc_err)) + skewness_array_mc_err[var1].append(float(skewness_mc_err)) + kurtosis_array_mc_err[var1].append(float(kurtosis_mc_err)) + print("Number of entries in histogram", num_entries_mc) # Prints out the results for this data file + print("Histogram average:", mean_mc) + print("Standard Deviation (RMS):", RMS_mc) + print("Histogram skewness:", skewness_mc) + print("Histogram kurtosis:", kurtosis_mc) + print("Current file:", filename_mc) + print("Current loop count:", loopcount_mc) + print("The current variable is:", var1) + print("The current outter variable is:", var2) + print("The current path name is: ", pathname_mc) +###Graphs + + if var2 == "Proton" or var2 == "Pi+" or var2 == "Pi-": + if partfilename_data == 'Hist_KinFitResults/Step0__Photon_Proton__Pi0_Pi+_Pi-_Proton': #CHANGE BASED ON WHAT REACTION YOU ARE STUDYING + var4 = ["Pull_Px"]#, "Pull_Xx"] + var5 = ["Pull_Py"]#, "Pull_Xy"] + var9 = ["Pull_Pz"]#, "Pull_Xz"] + # CHANGE ABOVE DEPENDING ON WHAT YOUR REACTION STUDYS + for var6 in [0]:#,1]: + #this prints the graphs for x,y,z mean + + if var2 == "Pi+" : graphsymbol = "#pi^{+}" + if var2 == "Pi-" : graphsymbol = "#pi^{-}" + if var2 == "Photon" : graphsymbol = "#gamma" + if var2 == "Proton" : graphsymbol = 'P^{+}' + if var6 == 0: graphname = 'Momentum' + + + tempvar2=var2+graphname + c3 = TCanvas("c3","c3",2400,900) + c3.Divide(2,1) + ###Data + c3.cd(1) + print("numver of runs: ", NumberOfRuns_data) + print("run vec data: ", run_vec_data) + print("mean array data: ", mean_array_data[var4[var6]]) + print("x axis err: ", x_axis_err) + print("mean data error: ", mean_array_data_err[var4[var6]]) + gr1 = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data[var4[var6]], x_axis_err, mean_array_data_err[var4[var6]]) # Size of arrays, followed by x,y-axis + gr1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr1.SetTitle("Data: Mean Versus Run Number of "+ graphname + " " + graphsymbol) + gr1.GetXaxis().SetTitle("Run Number") + gr1.GetYaxis().SetTitle("Mean") + gr1.SetMarkerColor(kBlue) + gr1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr2 = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data[var5[var6]], x_axis_err, mean_array_data_err[var5[var6]]) # Size of arrays, followed by x,y-axis + gr2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr2.SetMarkerColor(kRed) + gr2.SetMarkerStyle(kFullCircle) + gr3 = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data[var9[var6]], x_axis_err, mean_array_data_err[var9[var6]]) # Size of arrays, followed by x,y-axis + gr3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr3.SetMarkerColor(kGreen+3) + gr3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr1,"Mean X","pl") + legend.AddEntry(gr2,"Mean Y","pl") + legend.AddEntry(gr3,"Mean Z","pl") + gr1.Draw("AP") + gr2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr3.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ###mc + c3.cd(2) + gr_1 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc[var4[var6]], x_axis_err, mean_array_mc_err[var4[var6]]) # Size of arrays, followed by x,y-axis + gr_1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr_1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_1.SetTitle("MC: Mean Versus Run Number of "+ graphname + " " + graphsymbol) + gr_1.GetXaxis().SetTitle("Run Number") + gr_1.GetYaxis().SetTitle("Mean") + gr_1.SetMarkerColor(kBlue) + gr_1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr_2 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc[var5[var6]], x_axis_err, mean_array_mc_err[var5[var6]]) # Size of arrays, followed by x,y-axis + gr_2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_2.SetMarkerColor(kRed) + gr_2.SetMarkerStyle(kFullCircle) + gr_3 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc[var9[var6]], x_axis_err, mean_array_mc_err[var9[var6]]) # Size of arrays, followed by x,y-axis + gr_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_3.SetMarkerColor(kGreen+3) + gr_3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr1,"Mean X","pl") + legendd.AddEntry(gr2,"Mean Y","pl") + legendd.AddEntry(gr3,"Mean Z","pl") + gr_1.Draw("AP") + gr_2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr_3.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c3.SaveAs("splitplotMEAN%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #this prints the graphs for x,y,z RMS + c4 = TCanvas("c3","c3",2400,900) + c4.Divide(2,1) + ##DATA + c4.cd(1) + gr3 = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data[var4[var6]], x_axis_err, RMS_array_data_err[var4[var6]]) # Size of arrays, followed by x,y-axis + gr3.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gr3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr3.SetTitle("Data: RMS Versus Run Number of "+graphname+" "+ graphsymbol) + gr3.GetXaxis().SetTitle("Run Number") + gr3.GetYaxis().SetTitle("RMS") + gr3.SetMarkerColor(kBlue) + gr3.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr4 = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data[var5[var6]], x_axis_err, RMS_array_data_err[var5[var6]]) # Size of arrays, followed by x,y-axis + gr4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr4.SetMarkerColor(kRed) + gr4.SetMarkerStyle(kFullCircle) + gr5 = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data[var9[var6]], x_axis_err, RMS_array_data_err[var9[var6]]) # Size of arrays, followed by x,y-axis + gr5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr5.SetMarkerColor(kGreen+3) + gr5.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr3,"RMS X","pl") + legend.AddEntry(gr4,"RMS Y","pl") + legend.AddEntry(gr5,"RMS Z","pl") + gr3.Draw("AP") + gr4.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c4.cd(2) + gr_3 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc[var4[var6]], x_axis_err, RMS_array_mc_err[var4[var6]]) # Size of arrays, followed by x,y-axis + gr_3.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gr_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_3.SetTitle("MC: RMS Versus Run Number of "+graphname+" "+ graphsymbol) + gr_3.GetXaxis().SetTitle("Run Number") + gr_3.GetYaxis().SetTitle("RMS") + gr_3.SetMarkerColor(kBlue) + gr_3.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr_4 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc[var5[var6]], x_axis_err, RMS_array_mc_err[var5[var6]]) # Size of arrays, followed by x,y-axis + gr_4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_4.SetMarkerColor(kRed) + gr_4.SetMarkerStyle(kFullCircle) + gr_5 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc[var9[var6]], x_axis_err, RMS_array_mc_err[var9[var6]]) # Size of arrays, followed by x,y-axis + gr_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_5.SetMarkerColor(kGreen+3) + gr_5.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr_3,"RMS X","pl") + legendd.AddEntry(gr_4,"RMS Y","pl") + legendd.AddEntry(gr_5,"RMS Z","pl") + gr_3.Draw("AP") + gr_4.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr_5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c4.SaveAs("splitplotRMS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + c6 = TCanvas("c3","c3",2400,900) + c6.Divide(2,1) + ##DATA + c6.cd(1) + grap1 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data[var4[var6]], x_axis_err, skewness_array_data_err[var4[var6]]) # Size of arrays, followed by x,y-axis + grap1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + grap1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap1.SetTitle("Data: Skewness Versus Run Number of "+ graphname + " " + graphsymbol) + grap1.GetXaxis().SetTitle("Run Number") + grap1.GetYaxis().SetTitle("Skewness") + grap1.SetMarkerColor(kBlue) + grap1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap2 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data[var5[var6]], x_axis_err, skewness_array_data_err[var5[var6]]) # Size of arrays, followed by x,y-axis + grap2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap2.SetMarkerColor(kRed) + grap2.SetMarkerStyle(kFullCircle) + grap3 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data[var9[var6]], x_axis_err, skewness_array_data_err[var9[var6]]) # Size of arrays, followed by x,y-axis + grap3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap3.SetMarkerColor(kGreen+3) + grap3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(grap1,"Skewness X","pl") + legend.AddEntry(grap2,"Skewness Y","pl") + legend.AddEntry(grap3,"Skewness Z","pl") + grap1.Draw("AP") + grap2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap3.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##mc + c6.cd(2) + grap_1 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var4[var6]], x_axis_err, skewness_array_mc_err[var4[var6]]) # Size of arrays, followed by x,y-axis + grap_1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + grap_1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_1.SetTitle("MC: Skewness Versus Run Number of "+ graphname + " " + graphsymbol) + grap_1.GetXaxis().SetTitle("Run Number") + grap_1.GetYaxis().SetTitle("Skewness") + grap_1.SetMarkerColor(kBlue) + grap_1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap_2 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var5[var6]], x_axis_err, skewness_array_mc_err[var5[var6]]) # Size of arrays, followed by x,y-axis + grap_2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_2.SetMarkerColor(kRed) + grap_2.SetMarkerStyle(kFullCircle) + grap_3 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var9[var6]], x_axis_err, skewness_array_mc_err[var9[var6]]) # Size of arrays, followed by x,y-axis + grap_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_3.SetMarkerColor(kGreen+3) + grap_3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(grap_1,"Skewness X","pl") + legendd.AddEntry(grap_2,"Skewness Y","pl") + legendd.AddEntry(grap_3,"Skewness Z","pl") + grap_1.Draw("AP") + grap_2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap_3.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c6.SaveAs("splitplotSKEWNESS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + c7 = TCanvas("c3","c3",2400,900) + c7.Divide(2,1) + ##DATA + c7.cd(1) + grap4 = TGraphErrors(NumberOfRuns_data, run_vec_data, kurtosis_array_data[var4[var6]], x_axis_err, kurtosis_array_data_err[var4[var6]]) # Size of arrays, followed by x,y-axis + grap4.GetYaxis().SetRangeUser(-1,0.3) #this is the y-axis range + grap4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap4.SetTitle("Data: Kurtosis Versus Run Number of "+ graphname + " " + graphsymbol) + grap4.GetXaxis().SetTitle("Run Number") + grap4.GetYaxis().SetTitle("Kurtosis") + grap4.SetMarkerColor(kBlue) + grap4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap5 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data[var5[var6]], x_axis_err, skewness_array_data_err[var5[var6]]) # Size of arrays, followed by x,y-axis + grap5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap5.SetMarkerColor(kRed) + grap5.SetMarkerStyle(kFullCircle) + grap6 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data[var9[var6]], x_axis_err, skewness_array_data_err[var9[var6]]) # Size of arrays, followed by x,y-axis + grap6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap6.SetMarkerColor(kGreen+3) + grap6.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(grap4,"Kurtosis X","pl") + legend.AddEntry(grap5,"Kurtosis Y","pl") + legend.AddEntry(grap6,"Kurtosis Z","pl") + grap4.Draw("AP") + grap5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap6.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##MC + c7.cd(2) + grap_4 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc[var4[var6]], x_axis_err, kurtosis_array_mc_err[var4[var6]]) # Size of arrays, followed by x,y-axis + grap_4.GetYaxis().SetRangeUser(-1,0.3) #this is the y-axis range + grap_4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_4.SetTitle("MC: Kurtosis Versus Run Number of "+ graphname + " " + graphsymbol) + grap_4.GetXaxis().SetTitle("Run Number") + grap_4.GetYaxis().SetTitle("Kurtosis") + grap_4.SetMarkerColor(kBlue) + grap_4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap_5 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var5[var6]], x_axis_err, skewness_array_mc_err[var5[var6]]) # Size of arrays, followed by x,y-axis + grap_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_5.SetMarkerColor(kRed) + grap_5.SetMarkerStyle(kFullCircle) + grap_6 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var9[var6]], x_axis_err, skewness_array_mc_err[var9[var6]]) # Size of arrays, followed by x,y-axis + grap_6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_6.SetMarkerColor(kGreen+3) + grap_6.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(grap_4,"Kurtosis X","pl") + legendd.AddEntry(grap_5,"Kurtosis Y","pl") + legendd.AddEntry(grap_6,"Kurtosis Z","pl") + grap_4.Draw("AP") + grap_5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap_6.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c7.SaveAs("splitplotKURTOSIS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + + + if partfilename_mc == 'Hist_KinFitResults/Step1__Pi0__Photon_Photon' and var2 == "Photon": #CHANGE BASED ON WHAT REACTION YOU ARE STUDYING + tempvar = "Pull_E"+var2 + # the next bit makes the Mean Energy graph + + if var2 == "Pi+" : graphsymbol = "#pi^{+}" + if var2 == "Pi-" : graphsymbol = "#pi^{-}" + if var2 == "Photon" : graphsymbol = "#gamma" + if var2 == "Proton" : graphsymbol = 'P^{+}' + + try: + c1 = TCanvas("c1","c1",2400,900) # creates a 1200x900 pixel drawing pad called c1. not sure if I have to add 'new' somewhere + c1.Divide(2,1) + ##Data + c1.cd(1) + gr = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_E"], x_axis_err, mean_array_data_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gr.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr.SetTitle("Data: Mean Versus Run Number of Pull_E "+ graphsymbol) + gr.GetXaxis().SetTitle("Run Number") + gr.GetYaxis().SetTitle("Mean") + gr.SetMarkerColor(kBlue) + gr.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr,"Mean E","pl") + gr.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c1.cd(2) + gr_ = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_E"], x_axis_err, mean_array_mc_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gr_.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr_.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_.SetTitle("MC: Mean Versus Run Number of Pull_E "+ graphsymbol) + gr_.GetXaxis().SetTitle("Run Number") + gr_.GetYaxis().SetTitle("Mean") + gr_.SetMarkerColor(kBlue) + gr_.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr_,"Mean E","pl") + gr_.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c1.SaveAs("splitplotMEAN%s.pdf" % tempvar) #Saves in current directory, recommend saving asa pdf or png + + # the next bit makes the Energy RMS graph + c2 = TCanvas("c2","c2",2400,900) # creates a 2400x900 pixel drawing pad called c1; not sure if I have to add 'new' somewhere + c2.Divide(2,1) + ##DATA + c2.cd(1) + gra = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_E"], x_axis_err, RMS_array_data_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gra.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra.SetTitle("Data: RMS Versus Run Number of Pull_E "+ graphsymbol) + gra.GetXaxis().SetTitle("Run Number") + gra.GetYaxis().SetTitle("RMS") + gra.SetMarkerColor(kBlue) + gra.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra,"RMS E","pl") + gra.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c2.cd(2) + gra_ = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_E"], x_axis_err, RMS_array_mc_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gra_.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra_.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_.SetTitle("MC: RMS Versus Run Number of Pull_E "+ graphsymbol) + gra_.GetXaxis().SetTitle("Run Number") + gra_.GetYaxis().SetTitle("RMS") + gra_.SetMarkerColor(kBlue) + gra_.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_,"RMS E","pl") + gra_.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c2.SaveAs("splitplotRMS%s.pdf" % tempvar) #Saves in current directory, recommend saving as a pdf or png + + # the next bit makes the Energy RMS graph + c8 = TCanvas("c2","c2",2400,900) # creates a 2400x900 pixel drawing pad called c1; not sure if I have to add 'new' somewhere + c8.Divide(2,1) + ##DATA + c8.cd(1) + gra4 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_E"], x_axis_err, skewness_array_data_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gra4.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra4.SetTitle("Data: Skewness Versus Run Number of Pull_E "+ graphsymbol) + gra4.GetXaxis().SetTitle("Run Number") + gra4.GetYaxis().SetTitle("Skewness") + gra4.SetMarkerColor(kBlue) + gra4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra4,"Skewness E","pl") + gra4.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c8.cd(2) + gra_4 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_E"], x_axis_err, skewness_array_mc_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gra_4.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra_4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_4.SetTitle("MC: Skewness Versus Run Number of Pull_E "+ graphsymbol) + gra_4.GetXaxis().SetTitle("Run Number") + gra_4.GetYaxis().SetTitle("Skewness") + gra_4.SetMarkerColor(kBlue) + gra_4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_4,"Skewness E","pl") + gra_4.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c8.SaveAs("splitplotSKEWNESS%s.pdf" % tempvar) #Saves in current directory, recommend saving as a pdf or png + + # the next bit makes the Energy RMS graph + c9 = TCanvas("c2","c2",2400,900) # creates a 2400x900 pixel drawing pad called c1; not sure if I have to add 'new' somewhere + c9.Divide(2,1) + ##Data + c9.cd(1) + gra5 = TGraphErrors(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_E"], x_axis_err, kurtosis_array_data_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gra5.GetYaxis().SetRangeUser(-0.7,0) #this is the y-axis range + gra5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra5.SetTitle("Data: Kurtosis Versus Run Number of Pull_E "+ graphsymbol) + gra5.GetXaxis().SetTitle("Run Number") + gra5.GetYaxis().SetTitle("Kurtosis") + gra5.SetMarkerColor(kBlue) + gra5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra5,"Kurtosis E","pl") + gra5.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##Data + c9.cd(2) + gra_5 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_E"], x_axis_err, kurtosis_array_mc_err["Pull_E"]) # Size of arrays, followed by x,y-axis + gra_5.GetYaxis().SetRangeUser(-0.7,0) #this is the y-axis range + gra_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_5.SetTitle("MC: Kurtosis Versus Run Number of Pull_E "+ graphsymbol) + gra_5.GetXaxis().SetTitle("Run Number") + gra_5.GetYaxis().SetTitle("Kurtosis") + gra_5.SetMarkerColor(kBlue) + gra_5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_5,"Kurtosis E","pl") + gra_5.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c9.SaveAs("splitplotKURTOSIS%s.pdf" % tempvar) #Saves in current directory, recommend saving as a pdf or png + except: print "Photon energy couldn't be graphed" + + try: + #makes the photon Xxyz mean graph + tempvar2=var2+"Position" + c4 = TCanvas("c3","c3",2400,900) + c4.Divide(2,1) + ##DATA + c4.cd(1) + gr6 = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_Xx"], x_axis_err, mean_array_data_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gr6.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr6.SetTitle("Data: Mean Versus Run Number of Position " + graphsymbol) + gr6.GetXaxis().SetTitle("Run Number") + gr6.GetYaxis().SetTitle("Mean") + gr6.SetMarkerColor(kBlue) + gr6.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr7 = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_Xy"], x_axis_err, mean_array_data_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gr7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr7.SetMarkerColor(kRed) + gr7.SetMarkerStyle(kFullCircle) + gr8 = TGraphErrors(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_Xz"], x_axis_err, mean_array_data_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gr8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr8.SetMarkerColor(kGreen+3) + gr8.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr6,"Mean X","pl") + legend.AddEntry(gr7,"Mean Y","pl") + legend.AddEntry(gr8,"Mean Z","pl") + gr6.Draw("AP") + gr7.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr8.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##DATA + c4.cd(2) + gr_6 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_Xx"], x_axis_err, mean_array_mc_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gr_6.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr_6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_6.SetTitle("MC: Mean Versus Run Number of Position " + graphsymbol) + gr_6.GetXaxis().SetTitle("Run Number") + gr_6.GetYaxis().SetTitle("Mean") + gr_6.SetMarkerColor(kBlue) + gr_6.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr_7 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_Xy"], x_axis_err, mean_array_mc_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gr_7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_7.SetMarkerColor(kRed) + gr_7.SetMarkerStyle(kFullCircle) + gr_8 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_Xz"], x_axis_err, mean_array_mc_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gr_8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_8.SetMarkerColor(kGreen+3) + gr_8.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr_6,"Mean X","pl") + legendd.AddEntry(gr_7,"Mean Y","pl") + legendd.AddEntry(gr_8,"Mean Z","pl") + gr_6.Draw("AP") + gr_7.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr_8.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c4.SaveAs("splitplotMEAN%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #makes the photon Xxyz RMS graph + c5 = TCanvas("c3","c3",2400,900) + c5.Divide(2,1) + ##DATA + c5.cd(1) + gra1 = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_Xx"], x_axis_err, RMS_array_data_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra1.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra1.SetTitle("Data: RMS Versus Run Number of Position " + graphsymbol) + gra1.GetXaxis().SetTitle("Run Number") + gra1.GetYaxis().SetTitle("RMS") + gra1.SetMarkerColor(kBlue) + gra1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra2 = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_Xy"], x_axis_err, RMS_array_data_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra2.SetMarkerColor(kRed) + gra2.SetMarkerStyle(kFullCircle) + gra3 = TGraphErrors(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_Xz"], x_axis_err, RMS_array_data_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra3.SetMarkerColor(kGreen+3) + gra3.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra1,"RMS X","pl") + legend.AddEntry(gra2,"RMS Y","pl") + legend.AddEntry(gra3,"RMS Z","pl") + gra1.Draw("AP") + gra2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra3.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##MC + c5.cd(2) + gra_1 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_Xx"], x_axis_err, RMS_array_mc_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra_1.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra_1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_1.SetTitle("MC: RMS Versus Run Number of Position " + graphsymbol) + gra_1.GetXaxis().SetTitle("Run Number") + gra_1.GetYaxis().SetTitle("RMS") + gra_1.SetMarkerColor(kBlue) + gra_1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra_2 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_Xy"], x_axis_err, RMS_array_mc_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra_2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_2.SetMarkerColor(kRed) + gra_2.SetMarkerStyle(kFullCircle) + gra_3 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_Xz"], x_axis_err, RMS_array_mc_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_3.SetMarkerColor(kGreen+3) + gra_3.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_1,"RMS X","pl") + legendd.AddEntry(gra_2,"RMS Y","pl") + legendd.AddEntry(gra_3,"RMS Z","pl") + gra_1.Draw("AP") + gra_2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra_3.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c5.SaveAs("splitplotRMS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #makes the photon Xxyz RMS graph + c10 = TCanvas("c3","c3",2400,900) + c10.Divide(2,1) + ##DATA + c10.cd(1) + gra5 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_Xx"], x_axis_err, skewness_array_data_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra5.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra5.SetTitle("Data: Skewness Versus Run Number of Position " + graphsymbol) + gra5.GetXaxis().SetTitle("Run Number") + gra5.GetYaxis().SetTitle("Skewness") + gra5.SetMarkerColor(kBlue) + gra5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra6 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_Xy"], x_axis_err, skewness_array_data_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra6.SetMarkerColor(kRed) + gra6.SetMarkerStyle(kFullCircle) + gra7 = TGraphErrors(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_Xz"], x_axis_err, skewness_array_data_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra7.SetMarkerColor(kGreen+3) + gra7.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra5,"Skewness X","pl") + legend.AddEntry(gra6,"Skewness Y","pl") + legend.AddEntry(gra7,"Skewness Z","pl") + gra5.Draw("AP") + gra6.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra7.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##DATA + c10.cd(2) + gra_5 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_Xx"], x_axis_err, skewness_array_mc_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra_5.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_5.SetTitle("MC: Skewness Versus Run Number of Position " + graphsymbol) + gra_5.GetXaxis().SetTitle("Run Number") + gra_5.GetYaxis().SetTitle("Skewness") + gra_5.SetMarkerColor(kBlue) + gra_5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra_6 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_Xy"], x_axis_err, skewness_array_mc_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra_6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_6.SetMarkerColor(kRed) + gra_6.SetMarkerStyle(kFullCircle) + gra_7 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_Xz"], x_axis_err, skewness_array_mc_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra_7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_7.SetMarkerColor(kGreen+3) + gra_7.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_5,"Skewness X","pl") + legendd.AddEntry(gra_6,"Skewness Y","pl") + legendd.AddEntry(gra_7,"Skewness Z","pl") + gra_5.Draw("AP") + gra_6.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra_7.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c10.SaveAs("splitplotSKEWNESS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #makes the photon Xxyz RMS graph + c11 = TCanvas("c3","c3",2400,900) + c11.Divide(2,1) + ##Data + c11.cd(1) + gra8 = TGraphErrors(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_Xx"], x_axis_err, kurtosis_array_data_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra8.GetYaxis().SetRangeUser(-0.6,-0.2) #this is the y-axis range + gra8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra8.SetTitle("Data: Kurtosis Versus Run Number of Position " + graphsymbol) + gra8.GetXaxis().SetTitle("Run Number") + gra8.GetYaxis().SetTitle("Kurtosis") + gra8.SetMarkerColor(kBlue) + gra8.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra9 = TGraphErrors(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_Xy"], x_axis_err, kurtosis_array_data_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra9.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra9.SetMarkerColor(kRed) + gra9.SetMarkerStyle(kFullCircle) + gra10 = TGraphErrors(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_Xz"], x_axis_err, kurtosis_array_data_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra10.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra10.SetMarkerColor(kGreen+3) + gra10.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra8,"Kurtosis X","pl") + legend.AddEntry(gra9,"Kurtosis Y","pl") + legend.AddEntry(gra10,"Kurtosis Z","pl") + gra8.Draw("AP") + gra9.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra10.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##Data + c11.cd(2) + gra_8 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_Xx"], x_axis_err, kurtosis_array_mc_err["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra_8.GetYaxis().SetRangeUser(-0.6,-0.2) #this is the y-axis range + gra_8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_8.SetTitle("MC: Kurtosis Versus Run Number of Position " + graphsymbol) + gra_8.GetXaxis().SetTitle("Run Number") + gra_8.GetYaxis().SetTitle("Kurtosis") + gra_8.SetMarkerColor(kBlue) + gra_8.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra_9 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_Xy"], x_axis_err, kurtosis_array_mc_err["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra_9.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_9.SetMarkerColor(kRed) + gra_9.SetMarkerStyle(kFullCircle) + gra_10 = TGraphErrors(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_Xz"], x_axis_err, kurtosis_array_mc_err["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra_10.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_10.SetMarkerColor(kGreen+3) + gra_10.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_8,"Kurtosis X","pl") + legendd.AddEntry(gra_9,"Kurtosis Y","pl") + legendd.AddEntry(gra_10,"Kurtosis Z","pl") + gra_8.Draw("AP") + gra_9.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra_10.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c11.SaveAs("splitplotKURTOSIS%s.pdf" % tempvar2) #Saves in current directory, recommend saving as a pdf or png + except: print "Photon position couldn't be graphed" + counter = counter +1 + + +if __name__ == "__main__": + main(sys.argv[1:]) \ No newline at end of file diff --git a/kinfitter/monitoring/GraphRunPeriod_pippimpi0_ver2.py b/kinfitter/monitoring/GraphRunPeriod_pippimpi0_ver2.py new file mode 100755 index 00000000..02040778 --- /dev/null +++ b/kinfitter/monitoring/GraphRunPeriod_pippimpi0_ver2.py @@ -0,0 +1,822 @@ +#!/usr/bin/env python + +# Standard imports +from optparse import OptionParser +import os.path +import os +import sys +import subprocess +import glob +from array import * +from math import sqrt, exp + +#Import ROOT modules +from ROOT import * + +TOP_LEVEL_DATA_LOC = "/w/halld-scshelf2101/home/jzarling/ForPeople/ForMadison/hd_root_files_2017_DATA/" +TOP_LEVEL_MC_LOC = "/w/halld-scshelf2101/home/jzarling/ForPeople/ForMadison/hd_root_files_2017_MC/" + +def IsTDir(tobject): + if("TDirectoryFile" in str(type(tobject))): return True + else: return False + +def main(argv): + #Usage controls from OptionParser + parser_usage = "" + parser = OptionParser(usage = parser_usage) + (options, args) = parser.parse_args(argv) + if(len(args) != 0): + parser.print_help() + return + + gROOT.SetBatch(True) + + + #DATA + loopcount_data = 0 # This variable counts which file number we are on + filename_list_data = glob.glob(TOP_LEVEL_DATA_LOC+"/hd_root_0*.root") #file name is now an array variable that holds all the files we want + NumberOfRuns_data = len(filename_list_data) # Number of Runs specific to this data sample + + #MC + loopcount_mc = 0 # This variable counts which file number we are on + filename_list_mc = glob.glob(TOP_LEVEL_MC_LOC+"/hd_root_0*.root") #file name is now an array variable that holds all the files we want + NumberOfRuns_mc = len(filename_list_mc) # Number of Runs specific to this data sample + + #DATA + subsubkey_array_data = [] + pullname_array_data = [] + tempnum_data=0 + f_data = TFile.Open(TOP_LEVEL_DATA_LOC+'/hd_root_030597.root') #this is needed just to get this f bit to run; f will be redefined later + topdir_keys_data = f_data.GetListOfKeys() + for key_data in topdir_keys_data: + key_string_data = key_data.GetTitle() + print "Keyname: " + key_string_data #gets the keyname (ie. pi0pippim__B4_M7) + if key_string_data == 'pi0pippim__B4_F1_M7': + subdir_data = f_data.Get(key_string_data) + subdir_keys_data = subdir_data.GetListOfKeys() + for subkey_data in subdir_keys_data: + subkey_string_data = subkey_data.GetTitle() + print "Subkey: " + subkey_string_data #gets the subkey (ie. Hist_KinFitResults) + print "Trying to access: " + key_string_data+"/"+subkey_string_data + if subkey_string_data == 'Hist_KinFitResults': + subsubdir_data = f_data.Get(key_string_data+"/"+subkey_string_data) + print "subsubdir type: " +str(type(subsubdir_data)) + if IsTDir(subsubdir_data): + subsubdir_keys_data = subsubdir_data.GetListOfKeys() + for subsubkey_data in subsubdir_keys_data: + subsubkey_string_data = subsubkey_data.GetTitle() + print "Subsubkey title: " + subsubkey_string_data #gets the subsubkey (ie.Step0__Photon_Proton__Pi0_Pi+_Pi-_Proton) + if 'Step' in subsubkey_string_data: + tempnum_data=int(subsubkey_string_data[4]) + subsubkey_array_data.insert(tempnum_data, subkey_string_data+'/'+subsubkey_string_data) ##UP TO HERE WORKS + + #MC + subsubkey_array_mc = [] + pullname_array_mc = [] + tempnum_mc=0 + f_mc = TFile.Open(TOP_LEVEL_MC_LOC+'/hd_root_030597.root') #this is needed just to get this f bit to run; f will be redefined later + topdir_keys_mc = f_mc.GetListOfKeys() + for key_mc in topdir_keys_mc: + key_string_mc = key_mc.GetTitle() + print "Keyname: " + key_string_mc #gets the keyname (ie. pi0pippim__B4_M7) + if key_string_mc == 'pi0pippim__pi0_gg__B4_F1_M7': + subdir_mc = f_mc.Get(key_string_mc) + subdir_keys_mc = subdir_mc.GetListOfKeys() + for subkey_mc in subdir_keys_mc: + subkey_string_mc = subkey_mc.GetTitle() + print "Subkey: " + subkey_string_mc #gets the subkey (ie. Hist_KinFitResults) + print "Trying to access: " + key_string_mc+"/"+subkey_string_mc + if subkey_string_mc == 'Hist_KinFitResults': + subsubdir_mc = f_mc.Get(key_string_mc+"/"+subkey_string_mc) + print "subsubdir type: " +str(type(subsubdir_mc)) + if IsTDir(subsubdir_mc): + subsubdir_keys_mc = subsubdir_mc.GetListOfKeys() + for subsubkey_mc in subsubdir_keys_mc: + subsubkey_string_mc = subsubkey_mc.GetTitle() + print "Subsubkey title: " + subsubkey_string_mc #gets the subsubkey (ie.Step0__Photon_Proton__Pi0_Pi+_Pi-_Proton) + if 'Step' in subsubkey_string_mc: + tempnum_mc=int(subsubkey_string_mc[4]) + subsubkey_array_mc.insert(tempnum_mc, subkey_string_mc+'/'+subsubkey_string_mc) ##UP TO HERE WORKS + + + print 'subsubkey_array_data = ' + print subsubkey_array_data + print 'subsubkey_array_mc = ' + print subsubkey_array_mc + + counter = 0 + +#DATA + for partfilename_data in subsubkey_array_data: + partfilename_mc = subsubkey_array_mc[counter] + for var2 in ["Proton", "Pi+", "Pi-", "Photon"]: + + +#DATA + run_vec_data = array('f') # Will store all run numbers in an array. we will use this as the x-axis in a graph later + pull_var_list_data = ["Pull_Px", "Pull_Py", "Pull_Pz", "Pull_E"] + RMS_array_data = {var1 : array('f') for var1 in pull_var_list_data} # Stores all RMSs in an array in a dictionary. y-axis in graph + mean_array_data = {var1 : array('f') for var1 in pull_var_list_data}# Stores all means in an array in a dictionary. y-axis in graph + skewness_array_data = {var1 : array('f') for var1 in pull_var_list_data} + kurtosis_array_data = {var1 : array('f') for var1 in pull_var_list_data} + for loopcount_data, filename_data in enumerate(filename_list_data): # opening loop that collects all (x, y) + + f_data = TFile.Open(filename_data) + print("Current file:", filename_data) + run_vec_data.append(int(filename_data[-10:-5])) + for var1 in pull_var_list_data: + tempfilename_data = "pi0pippim__B4_F1_M7/"+partfilename_data+"/"+var2+"/"+var1 + print tempfilename_data + if tempfilename_data: + pathname_data = "pi0pippim__B4_F1_M7/"+partfilename_data+"/"+var2+"/"+var1 + h_data = f_data.Get("pi0pippim__B4_F1_M7/"+partfilename_data+"/"+var2+"/"+var1) + if hasattr(h_data, 'GetEntries'): + + num_entries_data = h_data.GetEntries() # Number of times a value was entered into the histogram + mean_data = h_data.GetMean() # Avarage vlaue of this histogram (we wnat it to be 0) + RMS_data = h_data.GetRMS() #standard deviation + skewness_data = h_data.GetSkewness() + kurtosis_data = h_data.GetKurtosis() + mean_array_data[var1].append(float(mean_data)) + RMS_array_data[var1].append(float(RMS_data)) #these will be used in the graphs + skewness_array_data[var1].append(float(skewness_data)) + kurtosis_array_data[var1].append(float(kurtosis_data)) + print("Number of entries in histogram", num_entries_data) + print("Histogram average:", mean_data) + print("Standard Deviation (RMS):", RMS_data) # Prints out the results for this data file + print("Histogram skewness:", skewness_data) + print("Histogram kurtosis:", kurtosis_data) + print("Current file:", filename_data) + print("Current loop count:", loopcount_data) + print("The current variable is:", var1) + print("The current outter variable is:", var2) + print("The current path name is: ", pathname_data) + + +#MC + run_vec_mc = array('f') # Will store all run numbers in an array. we will use this as the x-axis in a graph later + pull_var_list_mc = ["Pull_Px", "Pull_Py", "Pull_Pz", "Pull_E"] + RMS_array_mc = {var1 : array('f') for var1 in pull_var_list_mc} # Stores all RMSs in an array in a dictionary. y-axis in graph + mean_array_mc = {var1 : array('f') for var1 in pull_var_list_mc}# Stores all means in an array in a dictionary. y-axis in graph + skewness_array_mc = {var1 : array('f') for var1 in pull_var_list_mc} + kurtosis_array_mc = {var1 : array('f') for var1 in pull_var_list_mc} + for loopcount_mc, filename_mc in enumerate(filename_list_mc): # opening loop that collects all (x, y) + + f_mc = TFile.Open(filename_mc) + print("Current file:", filename_mc) + run_vec_mc.append(int(filename_mc[-10:-5])) + for var1 in pull_var_list_mc: + tempfilename_mc = "pi0pippim__pi0_gg__B4_F1_M7/"+partfilename_mc+"/"+var2+"/"+var1 + print tempfilename_mc + if tempfilename_mc: + pathname_mc = "pi0pippim__pi0_gg__B4_F1_M7/"+partfilename_mc+"/"+var2+"/"+var1 + h_mc = f_mc.Get("pi0pippim__pi0_gg__B4_F1_M7/"+partfilename_mc+"/"+var2+"/"+var1) + if hasattr(h_mc, 'GetEntries'): + + num_entries_mc = h_mc.GetEntries() # Number of times a value was entered into the histogram + mean_mc = h_mc.GetMean() # Avarage vlaue of this histogram (we wnat it to be 0) + RMS_mc = h_mc.GetRMS() #standard deviation + skewness_mc = h_mc.GetSkewness() + kurtosis_mc = h_mc.GetKurtosis() + mean_array_mc[var1].append(float(mean_mc)) + RMS_array_mc[var1].append(float(RMS_mc)) #these will be used in the graphs + skewness_array_mc[var1].append(float(skewness_mc)) + kurtosis_array_mc[var1].append(float(kurtosis_mc)) + print("Number of entries in histogram", num_entries_mc) + print("Histogram average:", mean_mc) + print("Standard Deviation (RMS):", RMS_mc) # Prints out the results for this data file + print("Histogram skewness:", skewness_mc) + print("Histogram kurtosis:", kurtosis_mc) + print("Current file:", filename_mc) + print("Current loop count:", loopcount_mc) + print("The current variable is:", var1) + print("The current outter variable is:", var2) + print("The current path name is: ", pathname_mc) +###Graphs + + if var2 == "Proton" or var2 == "Pi+" or var2 == "Pi-": + if partfilename_data == 'Hist_KinFitResults/Step0__Photon_Proton__Pi0_Pi+_Pi-_Proton': #might need to change + var4 = ["Pull_Px"]#, "Pull_Xx"] + var5 = ["Pull_Py"]#, "Pull_Xy"] + var9 = ["Pull_Pz"]#, "Pull_Xz"] + for var6 in [0]:#,1]: + #this prints the graphs for x,y,z mean + + if var2 == "Pi+" : graphsymbol = "#pi^{+}" + if var2 == "Pi-" : graphsymbol = "#pi^{-}" + if var2 == "Photon" : graphsymbol = "#gamma" + if var2 == "Proton" : graphsymbol = 'P^{+}' + if var6 == 0: graphname = 'Momentum' + + + tempvar2=var2+graphname + c3 = TCanvas("c3","c3",2400,900) + c3.Divide(2,1) + ###Data + c3.cd(1) + gr1 = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data[var4[var6]]) # Size of arrays, followed by x,y-axis + gr1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr1.SetTitle("Data: Mean Versus Run Number of "+ graphname + " " + graphsymbol) + gr1.GetXaxis().SetTitle("Run Number") + gr1.GetYaxis().SetTitle("Mean") + gr1.SetMarkerColor(kBlue) + gr1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr2 = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data[var5[var6]]) # Size of arrays, followed by x,y-axis + gr2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr2.SetMarkerColor(kRed) + gr2.SetMarkerStyle(kFullCircle) + gr3 = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data[var9[var6]]) # Size of arrays, followed by x,y-axis + gr3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr3.SetMarkerColor(kGreen+3) + gr3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr1,"Mean X","pl") + legend.AddEntry(gr2,"Mean Y","pl") + legend.AddEntry(gr3,"Mean Z","pl") + gr1.Draw("AP") + gr2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr3.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ###mc + c3.cd(2) + gr_1 = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc[var4[var6]]) # Size of arrays, followed by x,y-axis + gr_1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr_1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_1.SetTitle("MC: Mean Versus Run Number of "+ graphname + " " + graphsymbol) + gr_1.GetXaxis().SetTitle("Run Number") + gr_1.GetYaxis().SetTitle("Mean") + gr_1.SetMarkerColor(kBlue) + gr_1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr_2 = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc[var5[var6]]) # Size of arrays, followed by x,y-axis + gr_2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_2.SetMarkerColor(kRed) + gr_2.SetMarkerStyle(kFullCircle) + gr_3 = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc[var9[var6]]) # Size of arrays, followed by x,y-axis + gr_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_3.SetMarkerColor(kGreen+3) + gr_3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr1,"Mean X","pl") + legendd.AddEntry(gr2,"Mean Y","pl") + legendd.AddEntry(gr3,"Mean Z","pl") + gr_1.Draw("AP") + gr_2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr_3.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c3.SaveAs("splitplotMEAN%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #this prints the graphs for x,y,z RMS + c4 = TCanvas("c3","c3",2400,900) + c4.Divide(2,1) + ##DATA + c4.cd(1) + gr3 = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data[var4[var6]]) # Size of arrays, followed by x,y-axis + gr3.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gr3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr3.SetTitle("Data: RMS Versus Run Number of "+graphname+" "+ graphsymbol) + gr3.GetXaxis().SetTitle("Run Number") + gr3.GetYaxis().SetTitle("RMS") + gr3.SetMarkerColor(kBlue) + gr3.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr4 = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data[var5[var6]]) # Size of arrays, followed by x,y-axis + gr4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr4.SetMarkerColor(kRed) + gr4.SetMarkerStyle(kFullCircle) + gr5 = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data[var9[var6]]) # Size of arrays, followed by x,y-axis + gr5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr5.SetMarkerColor(kGreen+3) + gr5.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr3,"RMS X","pl") + legend.AddEntry(gr4,"RMS Y","pl") + legend.AddEntry(gr5,"RMS Z","pl") + gr3.Draw("AP") + gr4.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c4.cd(2) + gr_3 = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc[var4[var6]]) # Size of arrays, followed by x,y-axis + gr_3.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gr_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_3.SetTitle("MC: RMS Versus Run Number of "+graphname+" "+ graphsymbol) + gr_3.GetXaxis().SetTitle("Run Number") + gr_3.GetYaxis().SetTitle("RMS") + gr_3.SetMarkerColor(kBlue) + gr_3.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr_4 = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc[var5[var6]]) # Size of arrays, followed by x,y-axis + gr_4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_4.SetMarkerColor(kRed) + gr_4.SetMarkerStyle(kFullCircle) + gr_5 = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc[var9[var6]]) # Size of arrays, followed by x,y-axis + gr_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_5.SetMarkerColor(kGreen+3) + gr_5.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr_3,"RMS X","pl") + legendd.AddEntry(gr_4,"RMS Y","pl") + legendd.AddEntry(gr_5,"RMS Z","pl") + gr_3.Draw("AP") + gr_4.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr_5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c4.SaveAs("splitplotRMS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + c6 = TCanvas("c3","c3",2400,900) + c6.Divide(2,1) + ##DATA + c6.cd(1) + grap1 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data[var4[var6]]) # Size of arrays, followed by x,y-axis + grap1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + grap1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap1.SetTitle("Data: Skewness Versus Run Number of "+ graphname + " " + graphsymbol) + grap1.GetXaxis().SetTitle("Run Number") + grap1.GetYaxis().SetTitle("Skewness") + grap1.SetMarkerColor(kBlue) + grap1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap2 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data[var5[var6]]) # Size of arrays, followed by x,y-axis + grap2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap2.SetMarkerColor(kRed) + grap2.SetMarkerStyle(kFullCircle) + grap3 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data[var9[var6]]) # Size of arrays, followed by x,y-axis + grap3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap3.SetMarkerColor(kGreen+3) + grap3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(grap1,"Skewness X","pl") + legend.AddEntry(grap2,"Skewness Y","pl") + legend.AddEntry(grap3,"Skewness Z","pl") + grap1.Draw("AP") + grap2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap3.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##mc + c6.cd(2) + grap_1 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var4[var6]]) # Size of arrays, followed by x,y-axis + grap_1.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + grap_1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_1.SetTitle("MC: Skewness Versus Run Number of "+ graphname + " " + graphsymbol) + grap_1.GetXaxis().SetTitle("Run Number") + grap_1.GetYaxis().SetTitle("Skewness") + grap_1.SetMarkerColor(kBlue) + grap_1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap_2 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var5[var6]]) # Size of arrays, followed by x,y-axis + grap_2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_2.SetMarkerColor(kRed) + grap_2.SetMarkerStyle(kFullCircle) + grap_3 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var9[var6]]) # Size of arrays, followed by x,y-axis + grap_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_3.SetMarkerColor(kGreen+3) + grap_3.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(grap_1,"Skewness X","pl") + legendd.AddEntry(grap_2,"Skewness Y","pl") + legendd.AddEntry(grap_3,"Skewness Z","pl") + grap_1.Draw("AP") + grap_2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap_3.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c6.SaveAs("splitplotSKEWNESS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + c7 = TCanvas("c3","c3",2400,900) + c7.Divide(2,1) + ##DATA + c7.cd(1) + grap4 = TGraph(NumberOfRuns_data, run_vec_data, kurtosis_array_data[var4[var6]]) # Size of arrays, followed by x,y-axis + grap4.GetYaxis().SetRangeUser(-1,0.3) #this is the y-axis range + grap4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap4.SetTitle("Data: Kurtosis Versus Run Number of "+ graphname + " " + graphsymbol) + grap4.GetXaxis().SetTitle("Run Number") + grap4.GetYaxis().SetTitle("Kurtosis") + grap4.SetMarkerColor(kBlue) + grap4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap5 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data[var5[var6]]) # Size of arrays, followed by x,y-axis + grap5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap5.SetMarkerColor(kRed) + grap5.SetMarkerStyle(kFullCircle) + grap6 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data[var9[var6]]) # Size of arrays, followed by x,y-axis + grap6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap6.SetMarkerColor(kGreen+3) + grap6.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(grap4,"Kurtosis X","pl") + legend.AddEntry(grap5,"Kurtosis Y","pl") + legend.AddEntry(grap6,"Kurtosis Z","pl") + grap4.Draw("AP") + grap5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap6.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##MC + c7.cd(2) + grap_4 = TGraph(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc[var4[var6]]) # Size of arrays, followed by x,y-axis + grap_4.GetYaxis().SetRangeUser(-1,0.3) #this is the y-axis range + grap_4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_4.SetTitle("MC: Kurtosis Versus Run Number of "+ graphname + " " + graphsymbol) + grap_4.GetXaxis().SetTitle("Run Number") + grap_4.GetYaxis().SetTitle("Kurtosis") + grap_4.SetMarkerColor(kBlue) + grap_4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + grap_5 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var5[var6]]) # Size of arrays, followed by x,y-axis + grap_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_5.SetMarkerColor(kRed) + grap_5.SetMarkerStyle(kFullCircle) + grap_6 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc[var9[var6]]) # Size of arrays, followed by x,y-axis + grap_6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + grap_6.SetMarkerColor(kGreen+3) + grap_6.SetMarkerStyle(kFullCircle) + ## Defining TLegend for graphs gr1, gr2, ... + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(grap_4,"Kurtosis X","pl") + legendd.AddEntry(grap_5,"Kurtosis Y","pl") + legendd.AddEntry(grap_6,"Kurtosis Z","pl") + grap_4.Draw("AP") + grap_5.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + grap_6.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c7.SaveAs("splitplotKURTOSIS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + + + if partfilename_mc == 'Hist_KinFitResults/Step1__Pi0__Photon_Photon' and var2 == "Photon": #might need to change + tempvar = "Pull_E"+var2 + # the next bit makes the Mean Energy graph + + if var2 == "Pi+" : graphsymbol = "#pi^{+}" + if var2 == "Pi-" : graphsymbol = "#pi^{-}" + if var2 == "Photon" : graphsymbol = "#gamma" + if var2 == "Proton" : graphsymbol = 'P^{+}' + + try: + c1 = TCanvas("c1","c1",2400,900) # creates a 1200x900 pixel drawing pad called c1. not sure if I have to add 'new' somewhere + c1.Divide(2,1) + ##Data + c1.cd(1) + gr = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_E"]) # Size of arrays, followed by x,y-axis + gr.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr.SetTitle("Data: Mean Versus Run Number of Pull_E "+ graphsymbol) + gr.GetXaxis().SetTitle("Run Number") + gr.GetYaxis().SetTitle("Mean") + gr.SetMarkerColor(kBlue) + gr.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr,"Mean E","pl") + gr.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c1.cd(2) + gr_ = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_E"]) # Size of arrays, followed by x,y-axis + gr_.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr_.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_.SetTitle("MC: Mean Versus Run Number of Pull_E "+ graphsymbol) + gr_.GetXaxis().SetTitle("Run Number") + gr_.GetYaxis().SetTitle("Mean") + gr_.SetMarkerColor(kBlue) + gr_.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr_,"Mean E","pl") + gr_.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c1.SaveAs("splitplotMEAN%s.pdf" % tempvar) #Saves in current directory, recommend saving asa pdf or png + + # the next bit makes the Energy RMS graph + c2 = TCanvas("c2","c2",2400,900) # creates a 1200x900 pixel drawing pad called c1; not sure if I have to add 'new' somewhere + c2.Divide(2,1) + ##DATA + c2.cd(1) + gra = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_E"]) # Size of arrays, followed by x,y-axis + gra.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra.SetTitle("Data: RMS Versus Run Number of Pull_E "+ graphsymbol) + gra.GetXaxis().SetTitle("Run Number") + gra.GetYaxis().SetTitle("RMS") + gra.SetMarkerColor(kBlue) + gra.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra,"RMS E","pl") + gra.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c2.cd(2) + gra_ = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_E"]) # Size of arrays, followed by x,y-axis + gra_.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra_.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_.SetTitle("MC: RMS Versus Run Number of Pull_E "+ graphsymbol) + gra_.GetXaxis().SetTitle("Run Number") + gra_.GetYaxis().SetTitle("RMS") + gra_.SetMarkerColor(kBlue) + gra_.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_,"RMS E","pl") + gra_.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c2.SaveAs("splitplotRMS%s.pdf" % tempvar) #Saves in current directory, recommend saving as a pdf or png + + # the next bit makes the Energy RMS graph + c8 = TCanvas("c2","c2",2400,900) # creates a 1200x900 pixel drawing pad called c1; not sure if I have to add 'new' somewhere + c8.Divide(2,1) + ##DATA + c8.cd(1) + gra4 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_E"]) # Size of arrays, followed by x,y-axis + gra4.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra4.SetTitle("Data: Skewness Versus Run Number of Pull_E "+ graphsymbol) + gra4.GetXaxis().SetTitle("Run Number") + gra4.GetYaxis().SetTitle("Skewness") + gra4.SetMarkerColor(kBlue) + gra4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra4,"Skewness E","pl") + gra4.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##MC + c8.cd(2) + gra_4 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_E"]) # Size of arrays, followed by x,y-axis + gra_4.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra_4.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_4.SetTitle("MC: Skewness Versus Run Number of Pull_E "+ graphsymbol) + gra_4.GetXaxis().SetTitle("Run Number") + gra_4.GetYaxis().SetTitle("Skewness") + gra_4.SetMarkerColor(kBlue) + gra_4.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_4,"Skewness E","pl") + gra_4.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c8.SaveAs("splitplotSKEWNESS%s.pdf" % tempvar) #Saves in current directory, recommend saving as a pdf or png + + # the next bit makes the Energy RMS graph + c9 = TCanvas("c2","c2",2400,900) # creates a 1200x900 pixel drawing pad called c1; not sure if I have to add 'new' somewhere + c9.Divide(2,1) + ##Data + c9.cd(1) + gra5 = TGraph(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_E"]) # Size of arrays, followed by x,y-axis + gra5.GetYaxis().SetRangeUser(-0.7,0) #this is the y-axis range + gra5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra5.SetTitle("Data: Kurtosis Versus Run Number of Pull_E "+ graphsymbol) + gra5.GetXaxis().SetTitle("Run Number") + gra5.GetYaxis().SetTitle("Kurtosis") + gra5.SetMarkerColor(kBlue) + gra5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra5,"Kurtosis E","pl") + gra5.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legend.Draw() # Doesn't need any arguments + ##Data + c9.cd(2) + gra_5 = TGraph(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_E"]) # Size of arrays, followed by x,y-axis + gra_5.GetYaxis().SetRangeUser(-0.7,0) #this is the y-axis range + gra_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_5.SetTitle("MC: Kurtosis Versus Run Number of Pull_E "+ graphsymbol) + gra_5.GetXaxis().SetTitle("Run Number") + gra_5.GetYaxis().SetTitle("Kurtosis") + gra_5.SetMarkerColor(kBlue) + gra_5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_5,"Kurtosis E","pl") + gra_5.Draw("AP") # AP are options while drawing. A = show axis, P = markers at points (instea d of lines between points) Draws on Canvas c1 + legendd.Draw() # Doesn't need any arguments + c9.SaveAs("splitplotKURTOSIS%s.pdf" % tempvar) #Saves in current directory, recommend saving as a pdf or png + except: print "Photon energy couldn't be graphed" + + try: + #makes the photon Xxyz mean graph + tempvar2=var2+"Position" + c4 = TCanvas("c3","c3",2400,900) + c4.Divide(2,1) + ##DATA + c4.cd(1) + gr6 = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gr6.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr6.SetTitle("Data: Mean Versus Run Number of Position " + graphsymbol) + gr6.GetXaxis().SetTitle("Run Number") + gr6.GetYaxis().SetTitle("Mean") + gr6.SetMarkerColor(kBlue) + gr6.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr7 = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gr7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr7.SetMarkerColor(kRed) + gr7.SetMarkerStyle(kFullCircle) + gr8 = TGraph(NumberOfRuns_data, run_vec_data, mean_array_data["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gr8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr8.SetMarkerColor(kGreen+3) + gr8.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gr6,"Mean X","pl") + legend.AddEntry(gr7,"Mean Y","pl") + legend.AddEntry(gr8,"Mean Z","pl") + gr6.Draw("AP") + gr7.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr8.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##DATA + c4.cd(2) + gr_6 = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gr_6.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gr_6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_6.SetTitle("MC: Mean Versus Run Number of Position " + graphsymbol) + gr_6.GetXaxis().SetTitle("Run Number") + gr_6.GetYaxis().SetTitle("Mean") + gr_6.SetMarkerColor(kBlue) + gr_6.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gr_7 = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gr_7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_7.SetMarkerColor(kRed) + gr_7.SetMarkerStyle(kFullCircle) + gr_8 = TGraph(NumberOfRuns_mc, run_vec_mc, mean_array_mc["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gr_8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gr_8.SetMarkerColor(kGreen+3) + gr_8.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gr_6,"Mean X","pl") + legendd.AddEntry(gr_7,"Mean Y","pl") + legendd.AddEntry(gr_8,"Mean Z","pl") + gr_6.Draw("AP") + gr_7.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gr_8.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c4.SaveAs("splitplotMEAN%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #makes the photon Xxyz RMS graph + c5 = TCanvas("c3","c3",2400,900) + c5.Divide(2,1) + ##DATA + c5.cd(1) + gra1 = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra1.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra1.SetTitle("Data: RMS Versus Run Number of Position " + graphsymbol) + gra1.GetXaxis().SetTitle("Run Number") + gra1.GetYaxis().SetTitle("RMS") + gra1.SetMarkerColor(kBlue) + gra1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra2 = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra2.SetMarkerColor(kRed) + gra2.SetMarkerStyle(kFullCircle) + gra3 = TGraph(NumberOfRuns_data, run_vec_data, RMS_array_data["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra3.SetMarkerColor(kGreen+3) + gra3.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra1,"RMS X","pl") + legend.AddEntry(gra2,"RMS Y","pl") + legend.AddEntry(gra3,"RMS Z","pl") + gra1.Draw("AP") + gra2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra3.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##MC + c5.cd(2) + gra_1 = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra_1.GetYaxis().SetRangeUser(0.7,1.3) #this is the y-axis range + gra_1.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_1.SetTitle("MC: RMS Versus Run Number of Position " + graphsymbol) + gra_1.GetXaxis().SetTitle("Run Number") + gra_1.GetYaxis().SetTitle("RMS") + gra_1.SetMarkerColor(kBlue) + gra_1.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra_2 = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra_2.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_2.SetMarkerColor(kRed) + gra_2.SetMarkerStyle(kFullCircle) + gra_3 = TGraph(NumberOfRuns_mc, run_vec_mc, RMS_array_mc["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra_3.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_3.SetMarkerColor(kGreen+3) + gra_3.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_1,"RMS X","pl") + legendd.AddEntry(gra_2,"RMS Y","pl") + legendd.AddEntry(gra_3,"RMS Z","pl") + gra_1.Draw("AP") + gra_2.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra_3.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c5.SaveAs("splitplotRMS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #makes the photon Xxyz RMS graph + c10 = TCanvas("c3","c3",2400,900) + c10.Divide(2,1) + ##DATA + c10.cd(1) + gra5 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra5.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra5.SetTitle("Data: Skewness Versus Run Number of Position " + graphsymbol) + gra5.GetXaxis().SetTitle("Run Number") + gra5.GetYaxis().SetTitle("Skewness") + gra5.SetMarkerColor(kBlue) + gra5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra6 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra6.SetMarkerColor(kRed) + gra6.SetMarkerStyle(kFullCircle) + gra7 = TGraph(NumberOfRuns_data, run_vec_data, skewness_array_data["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra7.SetMarkerColor(kGreen+3) + gra7.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra5,"Skewness X","pl") + legend.AddEntry(gra6,"Skewness Y","pl") + legend.AddEntry(gra7,"Skewness Z","pl") + gra5.Draw("AP") + gra6.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra7.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##DATA + c10.cd(2) + gra_5 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra_5.GetYaxis().SetRangeUser(-0.3,0.3) #this is the y-axis range + gra_5.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_5.SetTitle("MC: Skewness Versus Run Number of Position " + graphsymbol) + gra_5.GetXaxis().SetTitle("Run Number") + gra_5.GetYaxis().SetTitle("Skewness") + gra_5.SetMarkerColor(kBlue) + gra_5.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra_6 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra_6.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_6.SetMarkerColor(kRed) + gra_6.SetMarkerStyle(kFullCircle) + gra_7 = TGraph(NumberOfRuns_mc, run_vec_mc, skewness_array_mc["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra_7.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_7.SetMarkerColor(kGreen+3) + gra_7.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_5,"Skewness X","pl") + legendd.AddEntry(gra_6,"Skewness Y","pl") + legendd.AddEntry(gra_7,"Skewness Z","pl") + gra_5.Draw("AP") + gra_6.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra_7.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c10.SaveAs("splitplotSKEWNESS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + + #makes the photon Xxyz RMS graph + c11 = TCanvas("c3","c3",2400,900) + c11.Divide(2,1) + ##Data + c11.cd(1) + gra8 = TGraph(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra8.GetYaxis().SetRangeUser(-0.6,-0.2) #this is the y-axis range + gra8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra8.SetTitle("Data: Kurtosis Versus Run Number of Position " + graphsymbol) + gra8.GetXaxis().SetTitle("Run Number") + gra8.GetYaxis().SetTitle("Kurtosis") + gra8.SetMarkerColor(kBlue) + gra8.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra9 = TGraph(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra9.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra9.SetMarkerColor(kRed) + gra9.SetMarkerStyle(kFullCircle) + gra10 = TGraph(NumberOfRuns_data, run_vec_data, kurtosis_array_data["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra10.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra10.SetMarkerColor(kGreen+3) + gra10.SetMarkerStyle(kFullCircle) + legend = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legend.AddEntry(gra8,"Kurtosis X","pl") + legend.AddEntry(gra9,"Kurtosis Y","pl") + legend.AddEntry(gra10,"Kurtosis Z","pl") + gra8.Draw("AP") + gra9.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra10.Draw("PSAME") + legend.Draw() # Doesn't need any arguments + ##Data + c11.cd(2) + gra_8 = TGraph(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_Xx"]) # Size of arrays, followed by x,y-axis + gra_8.GetYaxis().SetRangeUser(-0.6,-0.2) #this is the y-axis range + gra_8.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_8.SetTitle("MC: Kurtosis Versus Run Number of Position " + graphsymbol) + gra_8.GetXaxis().SetTitle("Run Number") + gra_8.GetYaxis().SetTitle("Kurtosis") + gra_8.SetMarkerColor(kBlue) + gra_8.SetMarkerStyle(kFullCircle) # See https://root.cern.ch/doc/masterclassTAttMarker.html + gra_9 = TGraph(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_Xy"]) # Size of arrays, followed by x,y-axis + gra_9.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_9.SetMarkerColor(kRed) + gra_9.SetMarkerStyle(kFullCircle) + gra_10 = TGraph(NumberOfRuns_mc, run_vec_mc, kurtosis_array_mc["Pull_Xz"]) # Size of arrays, followed by x,y-axis + gra_10.SetMarkerSize(1.2) # Makes marker size a bit bigger; standard is 1.0 + gra_10.SetMarkerColor(kGreen+3) + gra_10.SetMarkerStyle(kFullCircle) + legendd = TLegend(0.7,0.7,0.9,0.9) #0.1 is lower limit of plot, 0.9 is upper limit (beyond on either side is labeling+whitespace) + legendd.AddEntry(gra_8,"Kurtosis X","pl") + legendd.AddEntry(gra_9,"Kurtosis Y","pl") + legendd.AddEntry(gra_10,"Kurtosis Z","pl") + gra_8.Draw("AP") + gra_9.Draw("PSAME") # AP are options while drawing. A = show axis, P = markers at points (instead of lines between points) Draws on Canvas c1 + gra_10.Draw("PSAME") + legendd.Draw() # Doesn't need any arguments + c11.SaveAs("splitplotKURTOSIS%s.pdf" % tempvar2) #Saves in current directory, recommend saving asa pdf or png + except: print "Photon position couldn't be graphed" + counter = counter +1 + + +if __name__ == "__main__": + main(sys.argv[1:]) \ No newline at end of file diff --git a/kinfitter/monitoring/README.md b/kinfitter/monitoring/README.md new file mode 100644 index 00000000..4180f973 --- /dev/null +++ b/kinfitter/monitoring/README.md @@ -0,0 +1,5 @@ +# Tools for monitoring kinematic fitter performance +Tools for monitoring the default histograms produced by ReactionFilter, plotting pull quantities as a function of run number and comparing data to MC. Currently pi+pi-pi0 is the hardcoded reaction of interest, but code could be adapted for arbitrary topology. + +* Code author: Madison Blatchford (meb820@uregina.ca) +* Documentation at https://halldweb.jlab.org/doc-private/DocDB/ShowDocument?docid=5225 \ No newline at end of file diff --git a/launch_scripts/launch/launch.py b/launch_scripts/launch/launch.py index d526d0dd..087a8c9e 100755 --- a/launch_scripts/launch/launch.py +++ b/launch_scripts/launch/launch.py @@ -1,11 +1,11 @@ #!/usr/bin/env python +# Alex Austregesilo # Paul Mattione # Built off of hdswif by Kei Moriya # -# SWIF DOCUMENTATION: -# https://scicomp.jlab.org/docs/swif -# https://scicomp.jlab.org/docs/swif-cli +# SWIF2 DOCUMENTATION: +# https://scicomp.jlab.org/docs/swif2 from optparse import OptionParser import os.path @@ -220,9 +220,9 @@ def add_job(WORKFLOW, FILEPATH, config_dict): # CREATE ADD-JOB COMMAND # job - add_command = "swif add-job -workflow " + WORKFLOW + " -name " + JOBNAME + add_command = "swif2 add-job -workflow " + WORKFLOW + " -name " + JOBNAME # accounting - add_command += " -project " + config_dict["PROJECT"] + " -track " + config_dict["TRACK"] + " -os " + config_dict["OS"] + add_command += " -account " + config_dict["PROJECT"] + " -partition " + config_dict["TRACK"] + " -os " + config_dict["OS"] # resources add_command += " -cores " + config_dict["NCORES"] + " -disk " + config_dict["DISK"] + " -ram " + config_dict["RAM"] + " -time " + config_dict["TIMELIMIT"] # inputs diff --git a/launch_scripts/root_analysis/Run_Selector.C b/launch_scripts/root_analysis/Run_Selector.C index 44e349d5..32defa37 100644 --- a/launch_scripts/root_analysis/Run_Selector.C +++ b/launch_scripts/root_analysis/Run_Selector.C @@ -5,18 +5,10 @@ #include "TTree.h" #include "TDirectory.h" -R__LOAD_LIBRARY(libDSelector) - using namespace std; int Run_Selector(string locInputFileName, string locTreeName, string locSelectorName, unsigned int locNThreads) { - //Load library & headers - Long_t locResult = gROOT->ProcessLine(".x $ROOT_ANALYSIS_HOME/scripts/Load_DSelector.C"); - cout << "load return code: " << locResult << endl; - if(locResult != 0) - return 999; //error loading, return - //tell it to compile selector (if user did not) if(locSelectorName[locSelectorName.size() - 1] != '+') locSelectorName += '+'; @@ -25,8 +17,10 @@ int Run_Selector(string locInputFileName, string locTreeName, string locSelector if(locNThreads > 1) //USE PROOF { - DPROOFLiteManager::Set_SandBox("./"); - return (DPROOFLiteManager::Process_Tree(locInputFileName, locTreeName, locSelectorName, locNThreads) ? 0 : 999); //0 = success + gEnv->SetValue("ProofLite.Sandbox", "$PWD/.proof/"); // write all intermediate files to the local directory + DPROOFLiteManager::Set_SandBox("./"); // that does not work, as the proof session was already started + + return (DPROOFLiteManager::Process_Tree(locInputFileName, locTreeName, locSelectorName, locNThreads) ? 0 : 999); //0 = success } //process tree directly diff --git a/launch_scripts/root_analysis/script.sh b/launch_scripts/root_analysis/script.sh index cb563979..74a7d726 100755 --- a/launch_scripts/root_analysis/script.sh +++ b/launch_scripts/root_analysis/script.sh @@ -16,6 +16,17 @@ Setup_Script() perl -e "print qq(@INC)" echo "" + # COPY CCDB SQLITE FILE TO LOCAL DISK + if [[ $CCDB_CONNECTION == *"sqlite"* ]] ; then + local SCRATCH=/scratch/slurm/${SLURM_JOB_ID} + ls -l $SCRATCH + local NEW_SQLITE=${SCRATCH}/ccdb.sqlite + cp -v ${CCDB_CONNECTION:10} $NEW_SQLITE + export CCDB_CONNECTION=sqlite:///$NEW_SQLITE + export JANA_CALIB_URL=sqlite:///$NEW_SQLITE + echo "JANA_CALIB_URL: " $JANA_CALIB_URL + fi + # COPY INPUT FILE TO WORKING DIRECTORY # This step is necessary since the cache files will be created as soft links in the current directory, and we want to avoid large I/O processes. # We first copy the input file to the current directory, then remove the link. @@ -94,7 +105,7 @@ Save_ROOTFiles() local NUM_FILES=`ls *.root 2>/dev/null | wc -l` if [ $NUM_FILES -eq 0 ] ; then echo "No additional ROOT files produced" - return + exit 1 fi echo "Saving other ROOT files" @@ -132,7 +143,7 @@ Run_Script() # RUN ROOT Extract_FileName $SELECTOR_NAME SELECTOR_FILE cp ${SELECTOR_NAME}.* . - root -b -q $ROOT_SCRIPT'("'$INPUTFILE'", "'$TREE_NAME'", "'${SELECTOR_FILE}.C+'", '${NUM_THREADS}')' + root -b -q $ROOT_ANALYSIS_HOME/scripts/Load_DSelector.C $ROOT_SCRIPT'("'$INPUTFILE'", "'$TREE_NAME'", "'${SELECTOR_FILE}.C+'", '${NUM_THREADS}')' # RETURN CODE RETURN_CODE=$? diff --git a/psflux/README.md b/psflux/README.md index 4c832fd4..e70b3655 100644 --- a/psflux/README.md +++ b/psflux/README.md @@ -3,9 +3,11 @@ Python script for creating histograms of tagged and untagged PS flux: Justin Ste The tagged and untagged pair spectrometer flux and acceptance are stored in CCDB. The command to obtain histograms of the flux is: ``` -plot_flux_ccdb.py --begin-run beginRun --end-run endRun --num-bins 100 --energy-min 6.0 --energy-max 12.0 --rest-ver 3 +python plot_flux_ccdb.py --begin-run beginRun --end-run endRun --num-bins 100 --energy-min 6.0 --energy-max 12.0 --rest-ver 3 ``` +See below for notes on python version if you get an error with this command. + ## Command line options: If you run the script without any arguments you'll receive this message with a list of required and optional arguments @@ -45,6 +47,7 @@ https://halldweb.jlab.org/wiki-private/index.php/Spring_2016_Analysis_Launch https://halldweb.jlab.org/wiki-private/index.php/Spring_2017_Analysis_Launch https://halldweb.jlab.org/wiki-private/index.php/Spring_2018_Analysis_Launch https://halldweb.jlab.org/wiki-private/index.php/Fall_2018_Analysis_Launch +https://halldweb.jlab.org/wiki-private/index.php/Spring_2020_Analysis_Launch ## Prerequisites: @@ -62,11 +65,24 @@ and for simplicity you can use the standard build_scripts procedure to set these source /group/halld/Software/build_scripts/gluex_env_nightly.csh 2019-10-08 ``` -## Notes: +## Python Version Notes: + +You need to use a version of Python compatible with that used in your ROOT installation. If you get a python error when executing the script run this command to determine what version of python is used in your ROOT build + +root-config --python-version + +For older ROOT installations (6.08 or earlier) you will probably use python ver2.7, but for newer builds python ver3.6 is the default. Now use the correct python version to execute the script + +python2.7 plot_flux_ccdb.py ... + +or + +python3.6 plot_flux_ccdb.py ... -The flux values in the MySQL CCDB are from: +## The flux values in the MySQL CCDB are from: -RunPeriod-2018-01: REST ver02 production -RunPeriod-2018-08: REST ver02 production (ver00 for low-energy runs 51384-51457) +RunPeriod-2019-11: REST ver01 production +RunPeriod-2018-08: REST ver02 production +RunPeriod-2018-01: REST ver02 production RunPeriod-2017-01: REST ver03 production RunPeriod-2016-02: REST ver06 production diff --git a/psflux/plot_flux_ccdb.py b/psflux/plot_flux_ccdb.py index d6a89d39..7793954a 100755 --- a/psflux/plot_flux_ccdb.py +++ b/psflux/plot_flux_ccdb.py @@ -123,68 +123,68 @@ def main(): if options.angle: RCDB_POL_ANGLE = options.angle if options.nbins: - NBINS = int(options.nbins) + NBINS = int(options.nbins) if options.emin: EMIN = float(options.emin) if options.emax: EMAX = float(options.emax) if options.rcdb_query: - RCDB_QUERY_USER = options.rcdb_query + RCDB_QUERY_USER = options.rcdb_query if options.calib_time: try: CALIBTIME_USER = datetime.strptime(options.calib_time, "%Y-%m-%d-%H-%M-%S") except: - print "Calibration time format: Y-M-D-h-min-s" + print("Calibration time format: Y-M-D-h-min-s") sys.exit(0) if options.uniform: - UNIFORM = True + UNIFORM = True if options.rest_ver: - RESTVERSION = options.rest_ver + RESTVERSION = options.rest_ver if options.length: TARGETLENGTH = float(options.length) # Run-dependent defaults for RCDB query if RCDB_QUERY != RCDB_QUERY_USER: - RCDB_QUERY = RCDB_QUERY_USER + RCDB_QUERY = RCDB_QUERY_USER else: - if int(options.begin_run) >= 40000 and int(options.begin_run) < 60000: + if int(options.begin_run) >= 40000 and int(options.begin_run) < 60000: # 2018-01 and 2018-11 run periods RCDB_QUERY = "@is_2018production and @status_approved" if int(options.begin_run) >= 70000: # 2019-11 run period RCDB_QUERY = "@is_dirc_production and @status_approved" - print "RCDB quergy = " + RCDB_QUERY + print("RCDB quergy = " + RCDB_QUERY) # REST production dependent CCDB calibtime if CALIBTIME != CALIBTIME_USER: - CALIBTIME = CALIBTIME_USER - CALIBTIME_ENERGY = CALIBTIME_USER + CALIBTIME = CALIBTIME_USER + CALIBTIME_ENERGY = CALIBTIME_USER else: # get run period by run number - runPeriod = "test" - begin_run = int(options.begin_run) - if begin_run < 20000: - runPeriod = "RunPeriod-2016-02" - elif begin_run < 40000: - runPeriod = "RunPeriod-2017-01" + runPeriod = "test" + begin_run = int(options.begin_run) + if begin_run < 20000: + runPeriod = "RunPeriod-2016-02" + elif begin_run < 40000: + runPeriod = "RunPeriod-2017-01" elif begin_run < 50000: - runPeriod = "RunPeriod-2018-01" + runPeriod = "RunPeriod-2018-01" elif begin_run < 60000: - runPeriod = "RunPeriod-2018-08" + runPeriod = "RunPeriod-2018-08" elif begin_run < 70000: - runPeriod = "RunPeriod-2019-01" - elif begin_run < 80000: - runPeriod = "RunPeriod-2019-11" - contextList = loadCCDBContextList(runPeriod,RESTVERSION) - RESTVERSION = contextList[0][0] # get REST version number from DB - context = contextList[0][1] # get full JANA_CALIB_CONTEXT list from DB - startCalibTime = context.find("calibtime") - calibTimeString = context[startCalibTime+10:-1] - CALIBTIME_ENERGY = datetime.strptime(calibTimeString , "%Y-%m-%d-%H-%M-%S") - print "CCDB calibtime for energy to match REST ver%02d" % RESTVERSION + " = " + CALIBTIME_ENERGY.strftime("%Y-%m-%d-%H-%M-%S") + runPeriod = "RunPeriod-2019-01" + elif begin_run < 80000: + runPeriod = "RunPeriod-2019-11" + contextList = loadCCDBContextList(runPeriod,RESTVERSION) + RESTVERSION = contextList[0][0] # get REST version number from DB + context = contextList[0][1] # get full JANA_CALIB_CONTEXT list from DB + startCalibTime = context.find("calibtime") + calibTimeString = context[startCalibTime+10:-1] + CALIBTIME_ENERGY = datetime.strptime(calibTimeString , "%Y-%m-%d-%H-%M-%S") + print("CCDB calibtime for energy to match REST ver%02d" % RESTVERSION + " = " + CALIBTIME_ENERGY.strftime("%Y-%m-%d-%H-%M-%S")) # Load CCDB - print "CCDB calibtime for flux = " + CALIBTIME.strftime("%Y-%m-%d-%H-%M-%S") + print("CCDB calibtime for flux = " + CALIBTIME.strftime("%Y-%m-%d-%H-%M-%S")) ccdb_conn = LoadCCDB() # Load RCDB @@ -193,7 +193,7 @@ def main(): rcdb_conn = rcdb.RCDBProvider("mysql://rcdb@hallddb.jlab.org/rcdb") except: e = sys.exc_info()[0] - print "Could not connect to RCDB: " + str(e) + print("Could not connect to RCDB: " + str(e)) # get run list runs = rcdb_conn.select_runs(RCDB_QUERY, BEGINRUN, ENDRUN) @@ -208,7 +208,7 @@ def main(): tagh_scaled_energy = array('d') if UNIFORM: - htagged_flux = TH1D("tagged_flux_uniform", "Uniform tagged flux; Photon Beam Energy (GeV); Flux (# photons on target)", NBINS, EMIN, EMAX) + htagged_flux = TH1D("tagged_flux_uniform", "Uniform tagged flux; Photon Beam Energy (GeV); Flux (# photons on target)", NBINS, EMIN, EMAX) htagged_fluxErr = TH1D("tagged_flux", "Tagged flux; Photon Beam Energy (GeV); Flux (# photons on target)", NBINS, EMIN, EMAX) htagm_fluxErr = TH1D("tagm_flux", "Tagged flux; TAGM Column; Flux", 102, 1, 103) htagh_fluxErr = TH1D("tagh_flux", "Tagged flux; TAGH Counter; Flux", 274, 1, 275) @@ -216,8 +216,8 @@ def main(): # Loop over runs for run in runs: if RCDB_POLARIZATION == "" and RCDB_POL_ANGLE != "": - print "ERROR: polarization angle (option -a or --angle) specified, but polarization flag (option -p or --pol) was not. " - print "Please rerun and specify a polarization flag (PARA or PERP) while running" + print("ERROR: polarization angle (option -a or --angle) specified, but polarization flag (option -p or --pol) was not. ") + print("Please rerun and specify a polarization flag (PARA or PERP) while running") return # select run conditions: AMO, PARA, and PERP and polarization angle @@ -233,44 +233,44 @@ def main(): if RCDB_POL_ANGLE != "" and run.get_condition('polarization_angle').value != float(RCDB_POL_ANGLE): continue - print "==%d=="%run.number - + print("==%d=="%run.number) + # Set livetime scale factor - livetime_ratio = 0.0 - try: - livetime_assignment = ccdb_conn.get_assignment("/PHOTON_BEAM/pair_spectrometer/lumi/trig_live", run.number, VARIATION, CALIBTIME) - livetime = livetime_assignment.constant_set.data_table - if float(livetime[3][1]) > 0.0: # check that livetimes are non-zero - livetime_ratio = float(livetime[0][1])/float(livetime[3][1]) - else: # if bad livetime assume ratio is 1 - livetime_ratio = 1.0 - except: - livetime_ratio = 1.0 # default to unity if table doesn't exist + livetime_ratio = 0.0 + try: + livetime_assignment = ccdb_conn.get_assignment("/PHOTON_BEAM/pair_spectrometer/lumi/trig_live", run.number, VARIATION, CALIBTIME) + livetime = livetime_assignment.constant_set.data_table + if float(livetime[3][1]) > 0.0: # check that livetimes are non-zero + livetime_ratio = float(livetime[0][1])/float(livetime[3][1]) + else: # if bad livetime assume ratio is 1 + livetime_ratio = 1.0 + except: + livetime_ratio = 1.0 # default to unity if table doesn't exist # printout for livetimes different from unity #if livetime_ratio > 1.0 or livetime_ratio < 0.9: # print livetime_ratio # Conversion factors for total flux - converterThicknessTable = run.get_condition('polarimeter_converter') # 75 or 750 micron - converterThickness = "" - if converterThicknessTable: - converterThickness = converterThicknessTable.value - - converterLength = 0 - if converterThickness == "Be 75um": # default is 75 um - converterLength = 75e-6 - elif converterThickness == "Be 750um": - converterLength = 750e-6 - elif run.number > 10633 and run.number < 10694: # no coverter in RCDB, but 75 um from logbook - converterLength = 75e-6 - else: - print "Unknown converter thickness" - sys.exit(0) - - berilliumRL = 35.28e-2 # 35.28 cm - radiationLength = converterLength/berilliumRL; - scale = livetime_ratio * 1./((7/9.) * radiationLength); + converterThicknessTable = run.get_condition('polarimeter_converter') # 75 or 750 micron + converterThickness = "" + if converterThicknessTable: + converterThickness = converterThicknessTable.value + + converterLength = 0 + if converterThickness == "Be 75um": # default is 75 um + converterLength = 75e-6 + elif converterThickness == "Be 750um": + converterLength = 750e-6 + elif run.number > 10633 and run.number < 10694: # no coverter in RCDB, but 75 um from logbook + converterLength = 75e-6 + else: + print("Unknown converter thickness") + sys.exit(0) + + berilliumRL = 35.28e-2 # 35.28 cm + radiationLength = converterLength/berilliumRL; + scale = livetime_ratio * 1./((7/9.) * radiationLength); try: photon_endpoint_assignment = ccdb_conn.get_assignment("/PHOTON_BEAM/endpoint_energy", run.number, VARIATION, CALIBTIME_ENERGY) @@ -286,7 +286,7 @@ def main(): tagh_scaled_energy_assignment = ccdb_conn.get_assignment("/PHOTON_BEAM/hodoscope/scaled_energy_range", run.number, VARIATION, CALIBTIME_ENERGY) tagh_scaled_energy_table = tagh_scaled_energy_assignment.constant_set.data_table except: - print "Missing flux for run number = %d, contact jrsteven@jlab.org" % run.number + print("Missing flux for run number = %d, contact jrsteven@jlab.org" % run.number) sys.exit(0) @@ -308,12 +308,12 @@ def main(): calibrated_endpoint = True except: if run.number > 60000: - print "Missing endpoint calibration for run "+run.number + print("Missing endpoint calibration for run "+run.number) sys.exit(0) # fill tagm histogram - for tagm_flux, tagm_scaled_energy in zip(tagm_tagged_flux, tagm_scaled_energy_table): - tagm_energy = float(photon_endpoint[0][0])*(float(tagm_scaled_energy[1])+float(tagm_scaled_energy[2]))/2. + for tagm_flux, tagm_scaled_energy in zip(tagm_tagged_flux, tagm_scaled_energy_table): + tagm_energy = float(photon_endpoint[0][0])*(float(tagm_scaled_energy[1])+float(tagm_scaled_energy[2]))/2. if calibrated_endpoint: tagm_energy = float(photon_endpoint_calib[0][0])*(float(tagm_scaled_energy[1])+float(tagm_scaled_energy[2]))/2. + photon_endpoint_delta_E @@ -321,19 +321,19 @@ def main(): if psAccept <= 0.0: continue - if UNIFORM: - tagm_energy_low = float(photon_endpoint[0][0])*(float(tagm_scaled_energy[1])) - tagm_energy_high = float(photon_endpoint[0][0])*(float(tagm_scaled_energy[2])) - if calibrated_endpoint: - tagm_energy_low = float(photon_endpoint_calib[0][0])*(float(tagm_scaled_energy[1])) + photon_endpoint_delta_E - tagm_energy_high = float(photon_endpoint_calib[0][0])*(float(tagm_scaled_energy[2])) + photon_endpoint_delta_E + if UNIFORM: + tagm_energy_low = float(photon_endpoint[0][0])*(float(tagm_scaled_energy[1])) + tagm_energy_high = float(photon_endpoint[0][0])*(float(tagm_scaled_energy[2])) + if calibrated_endpoint: + tagm_energy_low = float(photon_endpoint_calib[0][0])*(float(tagm_scaled_energy[1])) + photon_endpoint_delta_E + tagm_energy_high = float(photon_endpoint_calib[0][0])*(float(tagm_scaled_energy[2])) + photon_endpoint_delta_E - flux = float(tagm_flux[1]) - i = 0 - while i <= flux: - energy = tagm_energy_low + gRandom.Uniform(tagm_energy_high-tagm_energy_low) - htagged_flux.Fill(energy,scale/fPSAcceptance.Eval(energy)) - i += 1 + flux = float(tagm_flux[1]) + i = 0 + while i <= flux: + energy = tagm_energy_low + gRandom.Uniform(tagm_energy_high-tagm_energy_low) + htagged_flux.Fill(energy,scale/fPSAcceptance.Eval(energy)) + i += 1 bin_energy = htagged_fluxErr.FindBin(tagm_energy) previous_bincontent = htagged_fluxErr.GetBinContent(bin_energy) @@ -343,13 +343,13 @@ def main(): current_binerror = float(tagm_flux[2]) * scale / psAccept new_bincontent = previous_bincontent + current_bincontent new_binerror = math.sqrt(previous_binerror*previous_binerror + current_binerror*current_binerror) - htagged_fluxErr.SetBinContent(bin_energy, new_bincontent) + htagged_fluxErr.SetBinContent(bin_energy, new_bincontent) htagged_fluxErr.SetBinError(bin_energy, new_binerror) htagm_fluxErr.Fill(int(tagm_flux[0]), current_bincontent) # fill tagh histogram - previous_energy_scaled_low = 999. # keep track of low energy bin boundry to avoid overlaps - for tagh_flux, tagh_scaled_energy in zip(tagh_tagged_flux, tagh_scaled_energy_table): + previous_energy_scaled_low = 999. # keep track of low energy bin boundry to avoid overlaps + for tagh_flux, tagh_scaled_energy in zip(tagh_tagged_flux, tagh_scaled_energy_table): tagh_energy = float(photon_endpoint[0][0])*(float(tagh_scaled_energy[1])+float(tagh_scaled_energy[2]))/2. if calibrated_endpoint: tagh_energy = float(photon_endpoint_calib[0][0])*(float(tagh_scaled_energy[1])+float(tagh_scaled_energy[2]))/2. + photon_endpoint_delta_E @@ -358,37 +358,37 @@ def main(): if psAccept <= 0.0: continue - if UNIFORM: - tagh_energy_low = float(photon_endpoint[0][0])*(float(tagh_scaled_energy[1])) - tagh_energy_high = float(photon_endpoint[0][0])*(float(tagh_scaled_energy[2])) - if calibrated_endpoint: - tagh_energy_low = float(photon_endpoint_calib[0][0])*(float(tagh_scaled_energy[1])) + photon_endpoint_delta_E - tagh_energy_high = float(photon_endpoint_calib[0][0])*(float(tagh_scaled_energy[2])) + photon_endpoint_delta_E - - if previous_energy_scaled_low < tagh_energy_high: - tagh_energy_high = previous_energy_scaled_low - - flux = float(tagh_flux[1]) - i = 0 - while i <= flux: - energy = tagh_energy_low + gRandom.Uniform(tagh_energy_high-tagh_energy_low) - ps_acceptance = fPSAcceptance.Eval(energy) - if ps_acceptance > 0: - htagged_flux.Fill(energy,scale/fPSAcceptance.Eval(energy)) - i += 1 - - previous_energy_scaled_low = tagh_energy_low - - bin_energy = htagged_fluxErr.FindBin(tagh_energy) - previous_bincontent = htagged_fluxErr.GetBinContent(bin_energy) - previous_binerror = htagged_fluxErr.GetBinError(bin_energy) + if UNIFORM: + tagh_energy_low = float(photon_endpoint[0][0])*(float(tagh_scaled_energy[1])) + tagh_energy_high = float(photon_endpoint[0][0])*(float(tagh_scaled_energy[2])) + if calibrated_endpoint: + tagh_energy_low = float(photon_endpoint_calib[0][0])*(float(tagh_scaled_energy[1])) + photon_endpoint_delta_E + tagh_energy_high = float(photon_endpoint_calib[0][0])*(float(tagh_scaled_energy[2])) + photon_endpoint_delta_E + + if previous_energy_scaled_low < tagh_energy_high: + tagh_energy_high = previous_energy_scaled_low + + flux = float(tagh_flux[1]) + i = 0 + while i <= flux: + energy = tagh_energy_low + gRandom.Uniform(tagh_energy_high-tagh_energy_low) + ps_acceptance = fPSAcceptance.Eval(energy) + if ps_acceptance > 0: + htagged_flux.Fill(energy,scale/fPSAcceptance.Eval(energy)) + i += 1 + + previous_energy_scaled_low = tagh_energy_low + + bin_energy = htagged_fluxErr.FindBin(tagh_energy) + previous_bincontent = htagged_fluxErr.GetBinContent(bin_energy) + previous_binerror = htagged_fluxErr.GetBinError(bin_energy) current_bincontent = float(tagh_flux[1]) * scale / psAccept current_binerror = float(tagh_flux[2]) * scale / psAccept - new_bincontent = previous_bincontent + current_bincontent - new_binerror = math.sqrt(previous_binerror*previous_binerror + current_binerror*current_binerror) - htagged_fluxErr.SetBinContent(bin_energy, new_bincontent) - htagged_fluxErr.SetBinError(bin_energy, new_binerror) + new_bincontent = previous_bincontent + current_bincontent + new_binerror = math.sqrt(previous_binerror*previous_binerror + current_binerror*current_binerror) + htagged_fluxErr.SetBinContent(bin_energy, new_bincontent) + htagged_fluxErr.SetBinError(bin_energy, new_binerror) htagh_fluxErr.Fill(int(tagh_flux[0]), current_bincontent) # Get density factor from CCDB @@ -434,7 +434,7 @@ def main(): fout = TFile(OUTPUT_FILENAME, "recreate") if UNIFORM: - htagged_flux.Write() + htagged_flux.Write() htagged_fluxErr.Write() htagged_lumiErr.Write() htagm_fluxErr.Write() diff --git a/tar_multi/disk_to_tape_backup.sh b/tar_multi/disk_to_tape_backup.sh index 34d62242..868b6151 100755 --- a/tar_multi/disk_to_tape_backup.sh +++ b/tar_multi/disk_to_tape_backup.sh @@ -1,6 +1,6 @@ #!/bin/bash -source_dir=$1 # directory name to be archive with full path -tar_multi=$2 # script to guide tar multi-volume archive making, extraction +source_dir=$1 # directory name to be archived (use full path) +tar_multi=$2 # script to guide tar multi-volume archive making/extraction size=$3 # maximum size of each tar file volume (suffix: G, M, or k) cache_dir=/cache/halld/home/backups$source_dir mkdir -pv $cache_dir