From 80558777f548a1456d6d1ab664dabaf85311cd79 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 20:44:12 +0300 Subject: [PATCH 1/4] Corrected SBT, LP for Sigmoid, Softmax, Bilinear. SBT for Sigmoid: Corrected criterion for fixed Sigmoid neuron. SBT for Softmax: Symbolic bounds now stored correctly in _symbolicLb, _symbolicUb. SBT for Bilinear: Fixed calculation for symbolic lower/upper bias, fixed mistaken assumption both source neurons are from same layer. LP relaxation for Bilinear: Fixed mistaken assumption both source neurons are from same layer. --- src/nlr/LPFormulator.cpp | 21 ++++---- src/nlr/Layer.cpp | 104 ++++++++++++++++++++++----------------- 2 files changed, 70 insertions(+), 55 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c7619f4d1c..07190df8d6 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file NetworkLevelReasoner.cpp +/*! \file LPFormulator.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz @@ -977,9 +977,9 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, double lb = std::max( 0.0, layer->getLb( i ) ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ + + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1464,20 +1464,21 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1526,10 +1527,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1537,10 +1538,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e6900538..131ba77739 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2269,7 +2269,7 @@ void Layer::computeSymbolicBoundsForSigmoid() double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -2706,7 +2706,6 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector sourceMids; Vector targetLbs; Vector targetUbs; - unsigned len = 0; for ( const auto &sourceIndex : sources ) { unsigned sourceNeuron = sourceIndex._neuron; @@ -2718,8 +2717,6 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - - ++len; } // Find the index of i in the softmax @@ -2758,8 +2755,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; + symbolicLb[_size * sourceIndex._neuron + i] = 0; + symbolicUb[_size * sourceIndex._neuron + i] = 0; } } else @@ -2782,7 +2779,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2795,7 +2792,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2808,7 +2805,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -2822,7 +2819,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2834,7 +2831,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -3036,27 +3033,26 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3067,16 +3063,6 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -3106,47 +3092,75 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceLbs[1] >= 0 ) + if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( sourceUbs[1] >= 0 ) + if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { - _symbolicLb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( sourceLbs[0] >= 0 ) + if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); From 066b1d30adf8a9a0c42bb16cb6d5431fba4b0a2b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 20:46:20 +0300 Subject: [PATCH 2/4] Correcting NLR unit tests. Fixed typos in documentations of some tests, corrected one Softmax SBT test. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b74cd3931c..a6ec8a49ee 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1641,7 +1641,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x6 x7 x8 = softmax(x3, x4, x5) x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 */ @@ -6041,7 +6041,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -6057,7 +6057,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - x5 range: [-2, 4] + x5 range: [-2, 5.5] */ List expectedBounds( { @@ -6113,7 +6113,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -7471,10 +7471,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); @@ -7577,9 +7577,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x5 range: [-21, 7] */ List expectedBounds( { Tightening( 2, -1, Tightening::LB ), From dd2d42d69fb1dd684db06d214b6b8bf6a2f1bb20 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 18 Sep 2025 20:50:39 +0300 Subject: [PATCH 3/4] Further fixing for Max, Softmax, Bilinear IA/SBT Interval Arithmetic and Symbolic Bound Tightening for Max, Softmax, Bilinear mistakenly assume 1. All source neurons are from same layer. 2. The size of the current layer is equal to the number of source neurons of any softmax neuron. Fixed both errors. --- src/nlr/LPFormulator.cpp | 4 ++-- src/nlr/Layer.cpp | 27 +++++++++++---------------- 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 07190df8d6..7a4af0ae74 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -977,9 +977,9 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, double lb = std::max( 0.0, layer->getLb( i ) ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - + // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 131ba77739..dc7869dd6e 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -1104,7 +1104,6 @@ void Layer::computeIntervalArithmeticBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); double maxLowerBound = FloatUtils::negativeInfinity(); @@ -1115,6 +1114,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1196,14 +1196,12 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - ASSERT( sourceLayer->getSize() == _size ); Vector sourceLbs; Vector sourceUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1253,14 +1251,13 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2548,11 +2545,6 @@ void Layer::computeSymbolicBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); double maxLowerBound = FloatUtils::negativeInfinity(); @@ -2562,6 +2554,7 @@ void Layer::computeSymbolicBoundsForMax() Map sourceUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2593,6 +2586,11 @@ void Layer::computeSymbolicBoundsForMax() } } + const Layer *sourceLayer = _layerOwner->getLayer( indexOfMaxLowerBound._layer ); + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + if ( phaseFixed ) { // Phase fixed @@ -2672,7 +2670,6 @@ void Layer::computeSymbolicBoundsForSoftmax() std::fill_n( _work, _size * _size, 0 ); Set handledInputNeurons; - unsigned sourceLayerSize = _size; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); for ( unsigned i = 0; i < _size; ++i ) @@ -2696,10 +2693,6 @@ void Layer::computeSymbolicBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - sourceLayerSize = sourceLayer->getSize(); - ASSERT( sourceLayerSize == _size ); Vector sourceLbs; Vector sourceUbs; @@ -2708,6 +2701,7 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector targetUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2842,6 +2836,7 @@ void Layer::computeSymbolicBoundsForSoftmax() for ( const auto &sourceLayerEntry : _sourceLayers ) { const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + unsigned sourceLayerSize = sourceLayer->getSize(); /* Perform the multiplication From 28cf80e710cb183d685d9eeb25ebf833dc78e358 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 18 Sep 2025 20:53:43 +0300 Subject: [PATCH 4/4] Clang-Format --- src/nlr/tests/Test_NetworkLevelReasoner.h | 28 +++++++++++------------ 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index a6ec8a49ee..bcd4e58db2 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -7461,20 +7461,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), - Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), - Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } );