Skip to content

Commit

Permalink
[lhe] in ggtt.mad prototype a simplification of multichannel ifdefs (m…
Browse files Browse the repository at this point in the history
…adgraph5#568) - will revert it and do it separately
  • Loading branch information
valassi committed Dec 14, 2022
1 parent b9f534b commit 63e04b9
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 61 deletions.
20 changes: 0 additions & 20 deletions epochX/cudacpp/gg_tt.mad/SubProcesses/MatrixElementKernels.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,8 @@ namespace mg5amcCpu
: MatrixElementKernelBase( momenta, gs, rndhel, rndcol, matrixElements, selhel, selcol )
, NumberOfEvents( nevt )
, m_couplings( nevt )
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
, m_numerators( nevt )
, m_denominators( nevt )
#endif
{
if( m_momenta.isOnDevice() ) throw std::runtime_error( "MatrixElementKernelHost: momenta must be a host array" );
if( m_matrixElements.isOnDevice() ) throw std::runtime_error( "MatrixElementKernelHost: matrixElements must be a host array" );
Expand Down Expand Up @@ -59,11 +57,7 @@ namespace mg5amcCpu
HostBufferHelicityMask hstIsGoodHel( ncomb );
// ... 0d1. Compute good helicity mask on the host
computeDependentCouplings( m_gs.data(), m_couplings.data(), m_gs.size() );
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
sigmaKin_getGoodHel( m_momenta.data(), m_couplings.data(), m_matrixElements.data(), m_numerators.data(), m_denominators.data(), hstIsGoodHel.data(), nevt() );
#else
sigmaKin_getGoodHel( m_momenta.data(), m_couplings.data(), m_matrixElements.data(), hstIsGoodHel.data(), nevt() );
#endif
// ... 0d2. Copy back good helicity list to static memory on the host
// [FIXME! REMOVE THIS STATIC THAT BREAKS MULTITHREADING?]
return sigmaKin_setGoodHel( hstIsGoodHel.data() );
Expand All @@ -74,11 +68,7 @@ namespace mg5amcCpu
void MatrixElementKernelHost::computeMatrixElements( const unsigned int channelId )
{
computeDependentCouplings( m_gs.data(), m_couplings.data(), m_gs.size() );
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
sigmaKin( m_momenta.data(), m_couplings.data(), m_rndhel.data(), m_rndcol.data(), m_matrixElements.data(), channelId, m_numerators.data(), m_denominators.data(), m_selhel.data(), m_selcol.data(), nevt() );
#else
sigmaKin( m_momenta.data(), m_couplings.data(), m_rndhel.data(), m_rndcol.data(), m_matrixElements.data(), m_selhel.data(), m_selcol.data(), nevt() );
#endif
}

//--------------------------------------------------------------------------
Expand Down Expand Up @@ -156,10 +146,8 @@ namespace mg5amcGpu
: MatrixElementKernelBase( momenta, gs, rndhel, rndcol, matrixElements, selhel, selcol )
, NumberOfEvents( gpublocks * gputhreads )
, m_couplings( this->nevt() )
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
, m_numerators( this->nevt() )
, m_denominators( this->nevt() )
#endif
, m_gpublocks( gpublocks )
, m_gputhreads( gputhreads )
{
Expand Down Expand Up @@ -198,11 +186,7 @@ namespace mg5amcGpu
DeviceBufferHelicityMask devIsGoodHel( ncomb );
// ... 0d1. Compute good helicity mask on the device
computeDependentCouplings<<<m_gpublocks, m_gputhreads>>>( m_gs.data(), m_couplings.data() );
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
sigmaKin_getGoodHel<<<m_gpublocks, m_gputhreads>>>( m_momenta.data(), m_couplings.data(), m_matrixElements.data(), m_numerators.data(), m_denominators.data(), devIsGoodHel.data() );
#else
sigmaKin_getGoodHel<<<m_gpublocks, m_gputhreads>>>( m_momenta.data(), m_couplings.data(), m_matrixElements.data(), devIsGoodHel.data() );
#endif
checkCuda( cudaPeekAtLastError() );
// ... 0d2. Copy back good helicity mask to the host
copyHostFromDevice( hstIsGoodHel, devIsGoodHel );
Expand All @@ -220,11 +204,7 @@ namespace mg5amcGpu
#else
constexpr unsigned int sharedMemSize = ntpbMAX * sizeof( float );
#endif
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
sigmaKin<<<m_gpublocks, m_gputhreads, sharedMemSize>>>( m_momenta.data(), m_couplings.data(), m_rndhel.data(), m_rndcol.data(), m_matrixElements.data(), channelId, m_numerators.data(), m_denominators.data(), m_selhel.data(), m_selcol.data() );
#else
sigmaKin<<<m_gpublocks, m_gputhreads, sharedMemSize>>>( m_momenta.data(), m_couplings.data(), m_rndhel.data(), m_rndcol.data(), m_matrixElements.data(), m_selhel.data(), m_selcol.data() );
#endif
checkCuda( cudaPeekAtLastError() );
checkCuda( cudaDeviceSynchronize() );
}
Expand Down
4 changes: 0 additions & 4 deletions epochX/cudacpp/gg_tt.mad/SubProcesses/MatrixElementKernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,13 +113,11 @@ namespace mg5amcCpu
// The buffer for the event-by-event couplings that depends on alphas QCD
HostBufferCouplings m_couplings;

#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
// The buffer for the event-by-event numerators of multichannel factors
HostBufferNumerators m_numerators;

// The buffer for the event-by-event denominators of multichannel factors
HostBufferDenominators m_denominators;
#endif
};
#endif

Expand Down Expand Up @@ -162,13 +160,11 @@ namespace mg5amcCpu
// The buffer for the event-by-event couplings that depends on alphas QCD
DeviceBufferCouplings m_couplings;

#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
// The buffer for the event-by-event numerators of multichannel factors
DeviceBufferNumerators m_numerators;

// The buffer for the event-by-event denominators of multichannel factors
DeviceBufferDenominators m_denominators;
#endif

// The number of blocks in the GPU grid
size_t m_gpublocks;
Expand Down
14 changes: 9 additions & 5 deletions epochX/cudacpp/gg_tt.mad/SubProcesses/MemoryBuffers.h
Original file line number Diff line number Diff line change
Expand Up @@ -262,13 +262,16 @@ namespace mg5amcCpu

//--------------------------------------------------------------------------

#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
// A base class encapsulating a memory buffer for numerators (of the multichannel single-diagram enhancement factors)
typedef BufferBase<fptype> BufferNumerators;

// The size (number of elements) per event in a memory buffer for numerators
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
constexpr size_t sizePerEventNumerators = 1;

#else
constexpr size_t sizePerEventNumerators = 0; // DUMMY! WILL NOT BE ALLOCATED
#endif

#ifndef __CUDACC__
// A class encapsulating a C++ host buffer for gs
typedef HostBuffer<fptype, sizePerEventNumerators, HostBufferALIGNED> HostBufferNumerators;
Expand All @@ -277,17 +280,19 @@ namespace mg5amcCpu
typedef PinnedHostBuffer<fptype, sizePerEventNumerators> PinnedHostBufferNumerators;
// A class encapsulating a CUDA device buffer for gs
typedef DeviceBuffer<fptype, sizePerEventNumerators> DeviceBufferNumerators;
#endif
#endif

//--------------------------------------------------------------------------

#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
// A base class encapsulating a memory buffer for denominators (of the multichannel single-diagram enhancement factors)
typedef BufferBase<fptype> BufferDenominators;

// The size (number of elements) per event in a memory buffer for denominators
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
constexpr size_t sizePerEventDenominators = 1;
#else
constexpr size_t sizePerEventDenominators = 0; // DUMMY! WILL NOT BE ALLOCATED
#endif

#ifndef __CUDACC__
// A class encapsulating a C++ host buffer for gs
Expand All @@ -297,7 +302,6 @@ namespace mg5amcCpu
typedef PinnedHostBuffer<fptype, sizePerEventDenominators> PinnedHostBufferDenominators;
// A class encapsulating a CUDA device buffer for gs
typedef DeviceBuffer<fptype, sizePerEventDenominators> DeviceBufferDenominators;
#endif
#endif

//--------------------------------------------------------------------------
Expand Down
31 changes: 7 additions & 24 deletions epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/CPPProcess.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,17 +94,18 @@ namespace mg5amcCpu
const fptype* allmomenta, // input: momenta[nevt*npar*4]
const fptype* allcouplings, // input: couplings[nevt*ndcoup*2]
fptype* allMEs // output: allMEs[nevt], |M|^2 running_sum_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
, const unsigned int channelId // input: multichannel channel id (1 to #diagrams); 0 to disable channel enhancement
, fptype* allNumerators // output: multichannel numerators[nevt], running_sum_over_helicities
, fptype* allDenominators // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
#ifndef __CUDACC__
, const int ievt00 // input: first event number in current C++ event page (for CUDA, ievt depends on threadid)
#endif
)
//ALWAYS_INLINE // attributes are not permitted in a function definition
{
#ifndef MGONGPU_SUPPORTS_MULTICHANNEL
assert( channelId == 0 );
#endif
#ifdef __CUDACC__
using namespace mg5amcGpu;
using M_ACCESS = DeviceAccessMomenta; // non-trivial access: buffer includes all events
Expand Down Expand Up @@ -625,10 +626,8 @@ namespace mg5amcCpu
sigmaKin_getGoodHel( const fptype* allmomenta, // input: momenta[nevt*npar*4]
const fptype* allcouplings, // input: couplings[nevt*ndcoup*2]
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
bool* isGoodHel ) // output: isGoodHel[ncomb] - device array (CUDA implementation)
{ /* clang-format on */
// FIXME: assume process.nprocesses == 1 for the moment (eventually: need a loop over processes here?)
Expand All @@ -638,12 +637,8 @@ namespace mg5amcCpu
for( int ihel = 0; ihel < ncomb; ihel++ )
{
// NB: calculate_wavefunctions ADDS |M|^2 for a given ihel to the running sum of |M|^2 over helicities for the given event(s)
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
constexpr unsigned int channelId = 0; // disable single-diagram channel enhancement
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs, channelId, allNumerators, allDenominators );
#else
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs );
#endif
if( allMEs[ievt] != allMEsLast )
{
//if ( !isGoodHel[ihel] ) std::cout << "sigmaKin_getGoodHel ihel=" << ihel << " TRUE" << std::endl;
Expand All @@ -657,10 +652,8 @@ namespace mg5amcCpu
sigmaKin_getGoodHel( const fptype* allmomenta, // input: momenta[nevt*npar*4]
const fptype* allcouplings, // input: couplings[nevt*ndcoup*2]
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
bool* isGoodHel, // output: isGoodHel[ncomb] - host array (C++ implementation)
const int nevt ) // input: #events (for cuda: nevt == ndim == gpublocks*gputhreads)
{
Expand Down Expand Up @@ -701,12 +694,8 @@ namespace mg5amcCpu
for( int ihel = 0; ihel < ncomb; ihel++ )
{
//std::cout << "sigmaKin_getGoodHel ihel=" << ihel << ( isGoodHel[ihel] ? " true" : " false" ) << std::endl;
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
constexpr unsigned int channelId = 0; // disable single-diagram channel enhancement
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs, channelId, allNumerators, allDenominators, ievt00 );
#else
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs, ievt00 );
#endif
for( int ieppV = 0; ieppV < neppV; ++ieppV )
{
const int ievt = ievt00 + ieppV;
Expand Down Expand Up @@ -771,18 +760,20 @@ namespace mg5amcCpu
const fptype* /*allrndhel*/, // input: random numbers[nevt] for helicity selection
const fptype* /*allrndcol*/, // input: random numbers[nevt] for color selection
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
const unsigned int channelId, // input: multichannel channel id (1 to #diagrams); 0 to disable channel enhancement
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
int* /*allselhel*/, // output: helicity selection[nevt]
int* /*allselcol*/ // output: helicity selection[nevt]
#ifndef __CUDACC__
, const int nevt // input: #events (for cuda: nevt == ndim == gpublocks*gputhreads)
#endif
) /* clang-format on */
{
#ifndef MGONGPU_SUPPORTS_MULTICHANNEL
assert( channelId == 0 );
#endif

mgDebugInitialise();

// Denominators: spins, colors and identical particles
Expand Down Expand Up @@ -833,11 +824,7 @@ namespace mg5amcCpu
for( int ighel = 0; ighel < cNGoodHel; ighel++ )
{
const int ihel = cGoodHel[ighel];
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs, channelId, allNumerators, allDenominators );
#else
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs );
#endif
//if ( ighel == 0 ) break; // TEST sectors/requests (issue #16)
}
#else
Expand Down Expand Up @@ -870,11 +857,7 @@ namespace mg5amcCpu
for( int ighel = 0; ighel < cNGoodHel; ighel++ )
{
const int ihel = cGoodHel[ighel];
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs, channelId, allNumerators, allDenominators, ievt00 );
#else
calculate_wavefunctions( ihel, allmomenta, allcouplings, allMEs, ievt00 );
#endif
}
}
#endif
Expand Down
8 changes: 0 additions & 8 deletions epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/CPPProcess.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,20 +103,16 @@ namespace mg5amcCpu
sigmaKin_getGoodHel( const fptype* allmomenta, // input: momenta[nevt*npar*4]
const fptype* allcouplings, // input: couplings[nevt*ndcoup*2]
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
bool* isGoodHel ); // output: isGoodHel[ncomb] - device array (CUDA implementation)
#else
__global__ void
sigmaKin_getGoodHel( const fptype* allmomenta, // input: momenta[nevt*npar*4]
const fptype* allcouplings, // input: couplings[nevt*ndcoup*2]
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
bool* isGoodHel, // output: isGoodHel[ncomb] - host array (C++ implementation)
const int nevt ); // input: #events (for cuda: nevt == ndim == gpublocks*gputhreads)
#endif /* clang-format on */
Expand All @@ -135,11 +131,9 @@ namespace mg5amcCpu
const fptype* allrndhel, // input: random numbers[nevt] for helicity selection
const fptype* allrndcol, // input: random numbers[nevt] for color selection
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
const unsigned int channelId, // input: multichannel channel id (1 to #diagrams); 0 to disable channel enhancement
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
int* allselhel, // output: helicity selection[nevt]
int* allselcol // output: helicity selection[nevt]
);
Expand All @@ -150,11 +144,9 @@ namespace mg5amcCpu
const fptype* allrndhel, // input: random numbers[nevt] for helicity selection
const fptype* allrndcol, // input: random numbers[nevt] for color selection
fptype* allMEs, // output: allMEs[nevt], |M|^2 final_avg_over_helicities
#ifdef MGONGPU_SUPPORTS_MULTICHANNEL
const unsigned int channelId, // input: multichannel channel id (1 to #diagrams); 0 to disable channel enhancement
fptype* allNumerators, // output: multichannel numerators[nevt], running_sum_over_helicities
fptype* allDenominators, // output: multichannel denominators[nevt], running_sum_over_helicities
#endif
int* allselhel, // output: helicity selection[nevt]
int* allselcol, // output: helicity selection[nevt]
const int nevt ); // input: #events (for cuda: nevt == ndim == gpublocks*gputhreads)
Expand Down

0 comments on commit 63e04b9

Please sign in to comment.